blob: e6a718bcb306ce0fa1a3de578141b6bd5ed29fe0 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000010#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000011#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000012#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000013#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000014#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000015#include "SIInstrInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000016#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000018#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000020#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000021#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000023#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000025#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000028#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000029#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000033#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000037#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000039#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000040#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000043#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000044#include "llvm/Support/AMDGPUMetadata.h"
Scott Linder1e8c2c72018-06-21 19:38:56 +000045#include "llvm/Support/AMDHSAKernelDescriptor.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000047#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000048#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000049#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000050#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000051#include "llvm/Support/SMLoc.h"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +000052#include "llvm/Support/TargetParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000053#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000054#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000055#include <algorithm>
56#include <cassert>
57#include <cstdint>
58#include <cstring>
59#include <iterator>
60#include <map>
61#include <memory>
62#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000063
Tom Stellard45bb48e2015-06-13 03:28:10 +000064using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000065using namespace llvm::AMDGPU;
Scott Linder1e8c2c72018-06-21 19:38:56 +000066using namespace llvm::amdhsa;
Tom Stellard45bb48e2015-06-13 03:28:10 +000067
68namespace {
69
Sam Kolton1eeb11b2016-09-09 14:44:04 +000070class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000071
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000072enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
73
Sam Kolton1eeb11b2016-09-09 14:44:04 +000074//===----------------------------------------------------------------------===//
75// Operand
76//===----------------------------------------------------------------------===//
77
Tom Stellard45bb48e2015-06-13 03:28:10 +000078class AMDGPUOperand : public MCParsedAsmOperand {
79 enum KindTy {
80 Token,
81 Immediate,
82 Register,
83 Expression
84 } Kind;
85
86 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000087 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000088
89public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000090 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000091 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000092
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000093 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000094
Sam Kolton945231a2016-06-10 09:57:59 +000095 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000096 bool Abs = false;
97 bool Neg = false;
98 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000099
100 bool hasFPModifiers() const { return Abs || Neg; }
101 bool hasIntModifiers() const { return Sext; }
102 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
103
104 int64_t getFPModifiersOperand() const {
105 int64_t Operand = 0;
106 Operand |= Abs ? SISrcMods::ABS : 0;
107 Operand |= Neg ? SISrcMods::NEG : 0;
108 return Operand;
109 }
110
111 int64_t getIntModifiersOperand() const {
112 int64_t Operand = 0;
113 Operand |= Sext ? SISrcMods::SEXT : 0;
114 return Operand;
115 }
116
117 int64_t getModifiersOperand() const {
118 assert(!(hasFPModifiers() && hasIntModifiers())
119 && "fp and int modifiers should not be used simultaneously");
120 if (hasFPModifiers()) {
121 return getFPModifiersOperand();
122 } else if (hasIntModifiers()) {
123 return getIntModifiersOperand();
124 } else {
125 return 0;
126 }
127 }
128
129 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
130 };
131
Tom Stellard45bb48e2015-06-13 03:28:10 +0000132 enum ImmTy {
133 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000135 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000136 ImmTyOffen,
137 ImmTyIdxen,
138 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000139 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000140 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000141 ImmTyOffset0,
142 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000143 ImmTyGLC,
144 ImmTySLC,
145 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000146 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000147 ImmTyClampSI,
148 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000149 ImmTyDppCtrl,
150 ImmTyDppRowMask,
151 ImmTyDppBankMask,
152 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000153 ImmTySdwaDstSel,
154 ImmTySdwaSrc0Sel,
155 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000156 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000157 ImmTyDMask,
158 ImmTyUNorm,
159 ImmTyDA,
Ryan Taylor1f334d02018-08-28 15:07:30 +0000160 ImmTyR128A16,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000161 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000162 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000163 ImmTyExpCompr,
164 ImmTyExpVM,
Tim Renouf35484c92018-08-21 11:06:05 +0000165 ImmTyFORMAT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000166 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000167 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000168 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000169 ImmTyInterpSlot,
170 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000171 ImmTyAttrChan,
172 ImmTyOpSel,
173 ImmTyOpSelHi,
174 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000175 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000176 ImmTySwizzle,
177 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000178 };
179
180 struct TokOp {
181 const char *Data;
182 unsigned Length;
183 };
184
185 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000186 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000187 ImmTy Type;
188 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000189 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000190 };
191
192 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000193 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000194 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000195 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000196 };
197
198 union {
199 TokOp Tok;
200 ImmOp Imm;
201 RegOp Reg;
202 const MCExpr *Expr;
203 };
204
Tom Stellard45bb48e2015-06-13 03:28:10 +0000205 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000206 if (Kind == Token)
207 return true;
208
209 if (Kind != Expression || !Expr)
210 return false;
211
212 // When parsing operands, we can't always tell if something was meant to be
213 // a token, like 'gds', or an expression that references a global variable.
214 // In this case, we assume the string is an expression, and if we need to
215 // interpret is a token, then we treat the symbol name as the token.
216 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000217 }
218
219 bool isImm() const override {
220 return Kind == Immediate;
221 }
222
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000223 bool isInlinableImm(MVT type) const;
224 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000225
Tom Stellard45bb48e2015-06-13 03:28:10 +0000226 bool isRegKind() const {
227 return Kind == Register;
228 }
229
230 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000231 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000232 }
233
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000234 bool isRegOrImmWithInputMods(MVT type) const {
235 return isRegKind() || isInlinableImm(type);
236 }
237
Matt Arsenault4bd72362016-12-10 00:39:12 +0000238 bool isRegOrImmWithInt16InputMods() const {
239 return isRegOrImmWithInputMods(MVT::i16);
240 }
241
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000242 bool isRegOrImmWithInt32InputMods() const {
243 return isRegOrImmWithInputMods(MVT::i32);
244 }
245
246 bool isRegOrImmWithInt64InputMods() const {
247 return isRegOrImmWithInputMods(MVT::i64);
248 }
249
Matt Arsenault4bd72362016-12-10 00:39:12 +0000250 bool isRegOrImmWithFP16InputMods() const {
251 return isRegOrImmWithInputMods(MVT::f16);
252 }
253
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000254 bool isRegOrImmWithFP32InputMods() const {
255 return isRegOrImmWithInputMods(MVT::f32);
256 }
257
258 bool isRegOrImmWithFP64InputMods() const {
259 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000260 }
261
Sam Kolton9772eb32017-01-11 11:46:30 +0000262 bool isVReg() const {
263 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
264 isRegClass(AMDGPU::VReg_64RegClassID) ||
265 isRegClass(AMDGPU::VReg_96RegClassID) ||
266 isRegClass(AMDGPU::VReg_128RegClassID) ||
267 isRegClass(AMDGPU::VReg_256RegClassID) ||
268 isRegClass(AMDGPU::VReg_512RegClassID);
269 }
270
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000271 bool isVReg32OrOff() const {
272 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
273 }
274
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000275 bool isSDWAOperand(MVT type) const;
276 bool isSDWAFP16Operand() const;
277 bool isSDWAFP32Operand() const;
278 bool isSDWAInt16Operand() const;
279 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000280
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000281 bool isImmTy(ImmTy ImmT) const {
282 return isImm() && Imm.Type == ImmT;
283 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000284
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000285 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000286 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000287 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000288
Sam Kolton945231a2016-06-10 09:57:59 +0000289 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
290 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
291 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000292 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
293 bool isDA() const { return isImmTy(ImmTyDA); }
Ryan Taylor1f334d02018-08-28 15:07:30 +0000294 bool isR128A16() const { return isImmTy(ImmTyR128A16); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000295 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000296 bool isOff() const { return isImmTy(ImmTyOff); }
297 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000298 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
299 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000300 bool isOffen() const { return isImmTy(ImmTyOffen); }
301 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
302 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
303 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
304 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
305 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000306
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000307 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
308 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000309 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000310 bool isLDS() const { return isImmTy(ImmTyLDS); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000311 bool isGLC() const { return isImmTy(ImmTyGLC); }
312 bool isSLC() const { return isImmTy(ImmTySLC); }
313 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000314 bool isD16() const { return isImmTy(ImmTyD16); }
Tim Renouf35484c92018-08-21 11:06:05 +0000315 bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000316 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
317 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
318 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
319 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
320 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
321 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
322 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000323 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
324 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
325 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000326 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
327 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
328 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
329 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000330 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000331
Sam Kolton945231a2016-06-10 09:57:59 +0000332 bool isMod() const {
333 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000334 }
335
336 bool isRegOrImm() const {
337 return isReg() || isImm();
338 }
339
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000340 bool isRegClass(unsigned RCID) const;
341
Sam Kolton9772eb32017-01-11 11:46:30 +0000342 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
343 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
344 }
345
Matt Arsenault4bd72362016-12-10 00:39:12 +0000346 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000347 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000348 }
349
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000350 bool isSCSrcV2B16() const {
351 return isSCSrcB16();
352 }
353
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000354 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000355 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000356 }
357
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000358 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000359 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000360 }
361
Matt Arsenault4bd72362016-12-10 00:39:12 +0000362 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000363 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000364 }
365
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000366 bool isSCSrcV2F16() const {
367 return isSCSrcF16();
368 }
369
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000370 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000371 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000372 }
373
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000374 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000375 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000376 }
377
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000378 bool isSSrcB32() const {
379 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
380 }
381
Matt Arsenault4bd72362016-12-10 00:39:12 +0000382 bool isSSrcB16() const {
383 return isSCSrcB16() || isLiteralImm(MVT::i16);
384 }
385
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000386 bool isSSrcV2B16() const {
387 llvm_unreachable("cannot happen");
388 return isSSrcB16();
389 }
390
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000391 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000392 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
393 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000394 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000395 }
396
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000397 bool isSSrcF32() const {
398 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000399 }
400
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000401 bool isSSrcF64() const {
402 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000403 }
404
Matt Arsenault4bd72362016-12-10 00:39:12 +0000405 bool isSSrcF16() const {
406 return isSCSrcB16() || isLiteralImm(MVT::f16);
407 }
408
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000409 bool isSSrcV2F16() const {
410 llvm_unreachable("cannot happen");
411 return isSSrcF16();
412 }
413
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000414 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000415 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000416 }
417
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000418 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000419 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000420 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000421
Matt Arsenault4bd72362016-12-10 00:39:12 +0000422 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000423 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000424 }
425
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000426 bool isVCSrcV2B16() const {
427 return isVCSrcB16();
428 }
429
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000430 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000431 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000432 }
433
434 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000435 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000436 }
437
Matt Arsenault4bd72362016-12-10 00:39:12 +0000438 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000439 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000440 }
441
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000442 bool isVCSrcV2F16() const {
443 return isVCSrcF16();
444 }
445
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000446 bool isVSrcB32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000447 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000448 }
449
450 bool isVSrcB64() const {
451 return isVCSrcF64() || isLiteralImm(MVT::i64);
452 }
453
Matt Arsenault4bd72362016-12-10 00:39:12 +0000454 bool isVSrcB16() const {
455 return isVCSrcF16() || isLiteralImm(MVT::i16);
456 }
457
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000458 bool isVSrcV2B16() const {
459 llvm_unreachable("cannot happen");
460 return isVSrcB16();
461 }
462
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000463 bool isVSrcF32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000464 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000465 }
466
467 bool isVSrcF64() const {
468 return isVCSrcF64() || isLiteralImm(MVT::f64);
469 }
470
Matt Arsenault4bd72362016-12-10 00:39:12 +0000471 bool isVSrcF16() const {
472 return isVCSrcF16() || isLiteralImm(MVT::f16);
473 }
474
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000475 bool isVSrcV2F16() const {
476 llvm_unreachable("cannot happen");
477 return isVSrcF16();
478 }
479
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000480 bool isKImmFP32() const {
481 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000482 }
483
Matt Arsenault4bd72362016-12-10 00:39:12 +0000484 bool isKImmFP16() const {
485 return isLiteralImm(MVT::f16);
486 }
487
Tom Stellard45bb48e2015-06-13 03:28:10 +0000488 bool isMem() const override {
489 return false;
490 }
491
492 bool isExpr() const {
493 return Kind == Expression;
494 }
495
496 bool isSoppBrTarget() const {
497 return isExpr() || isImm();
498 }
499
Sam Kolton945231a2016-06-10 09:57:59 +0000500 bool isSWaitCnt() const;
501 bool isHwreg() const;
502 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000503 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000504 bool isSMRDOffset8() const;
505 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000506 bool isSMRDLiteralOffset() const;
507 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000508 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000509 bool isS16Imm() const;
510 bool isU16Imm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000511
Tom Stellard89049702016-06-15 02:54:14 +0000512 StringRef getExpressionAsToken() const {
513 assert(isExpr());
514 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
515 return S->getSymbol().getName();
516 }
517
Sam Kolton945231a2016-06-10 09:57:59 +0000518 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000519 assert(isToken());
520
521 if (Kind == Expression)
522 return getExpressionAsToken();
523
Sam Kolton945231a2016-06-10 09:57:59 +0000524 return StringRef(Tok.Data, Tok.Length);
525 }
526
527 int64_t getImm() const {
528 assert(isImm());
529 return Imm.Val;
530 }
531
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000532 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000533 assert(isImm());
534 return Imm.Type;
535 }
536
537 unsigned getReg() const override {
538 return Reg.RegNo;
539 }
540
Tom Stellard45bb48e2015-06-13 03:28:10 +0000541 SMLoc getStartLoc() const override {
542 return StartLoc;
543 }
544
Peter Collingbourne0da86302016-10-10 22:49:37 +0000545 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000546 return EndLoc;
547 }
548
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000549 SMRange getLocRange() const {
550 return SMRange(StartLoc, EndLoc);
551 }
552
Sam Kolton945231a2016-06-10 09:57:59 +0000553 Modifiers getModifiers() const {
554 assert(isRegKind() || isImmTy(ImmTyNone));
555 return isRegKind() ? Reg.Mods : Imm.Mods;
556 }
557
558 void setModifiers(Modifiers Mods) {
559 assert(isRegKind() || isImmTy(ImmTyNone));
560 if (isRegKind())
561 Reg.Mods = Mods;
562 else
563 Imm.Mods = Mods;
564 }
565
566 bool hasModifiers() const {
567 return getModifiers().hasModifiers();
568 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000569
Sam Kolton945231a2016-06-10 09:57:59 +0000570 bool hasFPModifiers() const {
571 return getModifiers().hasFPModifiers();
572 }
573
574 bool hasIntModifiers() const {
575 return getModifiers().hasIntModifiers();
576 }
577
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000578 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
579
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000580 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000581
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000582 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000583
Matt Arsenault4bd72362016-12-10 00:39:12 +0000584 template <unsigned Bitwidth>
585 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
586
587 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
588 addKImmFPOperands<16>(Inst, N);
589 }
590
591 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
592 addKImmFPOperands<32>(Inst, N);
593 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000594
595 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000596
597 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
598 if (isRegKind())
599 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000600 else if (isExpr())
601 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000602 else
603 addImmOperands(Inst, N);
604 }
605
606 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
607 Modifiers Mods = getModifiers();
608 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
609 if (isRegKind()) {
610 addRegOperands(Inst, N);
611 } else {
612 addImmOperands(Inst, N, false);
613 }
614 }
615
616 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
617 assert(!hasIntModifiers());
618 addRegOrImmWithInputModsOperands(Inst, N);
619 }
620
621 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
622 assert(!hasFPModifiers());
623 addRegOrImmWithInputModsOperands(Inst, N);
624 }
625
Sam Kolton9772eb32017-01-11 11:46:30 +0000626 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
627 Modifiers Mods = getModifiers();
628 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
629 assert(isRegKind());
630 addRegOperands(Inst, N);
631 }
632
633 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
634 assert(!hasIntModifiers());
635 addRegWithInputModsOperands(Inst, N);
636 }
637
638 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
639 assert(!hasFPModifiers());
640 addRegWithInputModsOperands(Inst, N);
641 }
642
Sam Kolton945231a2016-06-10 09:57:59 +0000643 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
644 if (isImm())
645 addImmOperands(Inst, N);
646 else {
647 assert(isExpr());
648 Inst.addOperand(MCOperand::createExpr(Expr));
649 }
650 }
651
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000652 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000653 switch (Type) {
654 case ImmTyNone: OS << "None"; break;
655 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000656 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000657 case ImmTyOffen: OS << "Offen"; break;
658 case ImmTyIdxen: OS << "Idxen"; break;
659 case ImmTyAddr64: OS << "Addr64"; break;
660 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000661 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000662 case ImmTyOffset0: OS << "Offset0"; break;
663 case ImmTyOffset1: OS << "Offset1"; break;
664 case ImmTyGLC: OS << "GLC"; break;
665 case ImmTySLC: OS << "SLC"; break;
666 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000667 case ImmTyD16: OS << "D16"; break;
Tim Renouf35484c92018-08-21 11:06:05 +0000668 case ImmTyFORMAT: OS << "FORMAT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000669 case ImmTyClampSI: OS << "ClampSI"; break;
670 case ImmTyOModSI: OS << "OModSI"; break;
671 case ImmTyDppCtrl: OS << "DppCtrl"; break;
672 case ImmTyDppRowMask: OS << "DppRowMask"; break;
673 case ImmTyDppBankMask: OS << "DppBankMask"; break;
674 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000675 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
676 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
677 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000678 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
679 case ImmTyDMask: OS << "DMask"; break;
680 case ImmTyUNorm: OS << "UNorm"; break;
681 case ImmTyDA: OS << "DA"; break;
Ryan Taylor1f334d02018-08-28 15:07:30 +0000682 case ImmTyR128A16: OS << "R128A16"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000683 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000684 case ImmTyOff: OS << "Off"; break;
685 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000686 case ImmTyExpCompr: OS << "ExpCompr"; break;
687 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000688 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000689 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000690 case ImmTyInterpSlot: OS << "InterpSlot"; break;
691 case ImmTyInterpAttr: OS << "InterpAttr"; break;
692 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000693 case ImmTyOpSel: OS << "OpSel"; break;
694 case ImmTyOpSelHi: OS << "OpSelHi"; break;
695 case ImmTyNegLo: OS << "NegLo"; break;
696 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000697 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000698 case ImmTyHigh: OS << "High"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000699 }
700 }
701
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000702 void print(raw_ostream &OS) const override {
703 switch (Kind) {
704 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000705 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000706 break;
707 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000708 OS << '<' << getImm();
709 if (getImmTy() != ImmTyNone) {
710 OS << " type: "; printImmTy(OS, getImmTy());
711 }
Sam Kolton945231a2016-06-10 09:57:59 +0000712 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000713 break;
714 case Token:
715 OS << '\'' << getToken() << '\'';
716 break;
717 case Expression:
718 OS << "<expr " << *Expr << '>';
719 break;
720 }
721 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000722
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000723 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
724 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000725 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000726 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000727 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000728 Op->Imm.Val = Val;
729 Op->Imm.IsFPImm = IsFPImm;
730 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000731 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000732 Op->StartLoc = Loc;
733 Op->EndLoc = Loc;
734 return Op;
735 }
736
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000737 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
738 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000739 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000740 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000741 Res->Tok.Data = Str.data();
742 Res->Tok.Length = Str.size();
743 Res->StartLoc = Loc;
744 Res->EndLoc = Loc;
745 return Res;
746 }
747
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000748 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
749 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000750 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000751 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000752 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000753 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000754 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000755 Op->Reg.IsForcedVOP3 = ForceVOP3;
756 Op->StartLoc = S;
757 Op->EndLoc = E;
758 return Op;
759 }
760
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000761 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
762 const class MCExpr *Expr, SMLoc S) {
763 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000764 Op->Expr = Expr;
765 Op->StartLoc = S;
766 Op->EndLoc = S;
767 return Op;
768 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000769};
770
Sam Kolton945231a2016-06-10 09:57:59 +0000771raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
772 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
773 return OS;
774}
775
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000776//===----------------------------------------------------------------------===//
777// AsmParser
778//===----------------------------------------------------------------------===//
779
Artem Tamazova01cce82016-12-27 16:00:11 +0000780// Holds info related to the current kernel, e.g. count of SGPRs used.
781// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
782// .amdgpu_hsa_kernel or at EOF.
783class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000784 int SgprIndexUnusedMin = -1;
785 int VgprIndexUnusedMin = -1;
786 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000787
788 void usesSgprAt(int i) {
789 if (i >= SgprIndexUnusedMin) {
790 SgprIndexUnusedMin = ++i;
791 if (Ctx) {
792 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
793 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
794 }
795 }
796 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000797
Artem Tamazova01cce82016-12-27 16:00:11 +0000798 void usesVgprAt(int i) {
799 if (i >= VgprIndexUnusedMin) {
800 VgprIndexUnusedMin = ++i;
801 if (Ctx) {
802 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
803 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
804 }
805 }
806 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000807
Artem Tamazova01cce82016-12-27 16:00:11 +0000808public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000809 KernelScopeInfo() = default;
810
Artem Tamazova01cce82016-12-27 16:00:11 +0000811 void initialize(MCContext &Context) {
812 Ctx = &Context;
813 usesSgprAt(SgprIndexUnusedMin = -1);
814 usesVgprAt(VgprIndexUnusedMin = -1);
815 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000816
Artem Tamazova01cce82016-12-27 16:00:11 +0000817 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
818 switch (RegKind) {
819 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
820 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
821 default: break;
822 }
823 }
824};
825
Tom Stellard45bb48e2015-06-13 03:28:10 +0000826class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000827 MCAsmParser &Parser;
828
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000829 // Number of extra operands parsed after the first optional operand.
830 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000831 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000832
Eugene Zelenko66203762017-01-21 00:53:49 +0000833 unsigned ForcedEncodingSize = 0;
834 bool ForcedDPP = false;
835 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000836 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000837
Tom Stellard45bb48e2015-06-13 03:28:10 +0000838 /// @name Auto-generated Match Functions
839 /// {
840
841#define GET_ASSEMBLER_HEADER
842#include "AMDGPUGenAsmMatcher.inc"
843
844 /// }
845
Tom Stellard347ac792015-06-26 21:15:07 +0000846private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000847 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000848 bool OutOfRangeError(SMRange Range);
849 /// Calculate VGPR/SGPR blocks required for given target, reserved
850 /// registers, and user-specified NextFreeXGPR values.
851 ///
852 /// \param Features [in] Target features, used for bug corrections.
853 /// \param VCCUsed [in] Whether VCC special SGPR is reserved.
854 /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
855 /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
856 /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
857 /// \param VGPRRange [in] Token range, used for VGPR diagnostics.
858 /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
859 /// \param SGPRRange [in] Token range, used for SGPR diagnostics.
860 /// \param VGPRBlocks [out] Result VGPR block count.
861 /// \param SGPRBlocks [out] Result SGPR block count.
862 bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
863 bool FlatScrUsed, bool XNACKUsed,
864 unsigned NextFreeVGPR, SMRange VGPRRange,
865 unsigned NextFreeSGPR, SMRange SGPRRange,
866 unsigned &VGPRBlocks, unsigned &SGPRBlocks);
867 bool ParseDirectiveAMDGCNTarget();
868 bool ParseDirectiveAMDHSAKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000869 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
870 bool ParseDirectiveHSACodeObjectVersion();
871 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000872 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
873 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000874 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000875 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000876
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000877 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +0000878 bool ParseDirectiveHSAMetadata();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000879 bool ParseDirectivePALMetadata();
880
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000881 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
882 RegisterKind RegKind, unsigned Reg1,
883 unsigned RegNum);
884 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
885 unsigned& RegNum, unsigned& RegWidth,
886 unsigned *DwordRegIndex);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000887 Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
888 void initializeGprCountSymbol(RegisterKind RegKind);
889 bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
890 unsigned RegWidth);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000891 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +0000892 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000893 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
894 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000895
Tom Stellard45bb48e2015-06-13 03:28:10 +0000896public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000897 enum AMDGPUMatchResultTy {
898 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
899 };
900
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000901 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000902
Akira Hatanakab11ef082015-11-14 06:35:56 +0000903 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000904 const MCInstrInfo &MII,
905 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +0000906 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000907 MCAsmParserExtension::Initialize(Parser);
908
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000909 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000910 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000911 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000912 }
913
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000914 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000915
916 {
917 // TODO: make those pre-defined variables read-only.
918 // Currently there is none suitable machinery in the core llvm-mc for this.
919 // MCSymbol::isRedefinable is intended for another purpose, and
920 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000921 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Artem Tamazov17091362016-06-14 15:03:59 +0000922 MCContext &Ctx = getContext();
Scott Linder1e8c2c72018-06-21 19:38:56 +0000923 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
924 MCSymbol *Sym =
925 Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
926 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
927 } else {
928 MCSymbol *Sym =
929 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
930 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
931 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
932 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
933 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
934 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
935 }
936 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
937 initializeGprCountSymbol(IS_VGPR);
938 initializeGprCountSymbol(IS_SGPR);
939 } else
940 KernelScope.initialize(getContext());
Artem Tamazov17091362016-06-14 15:03:59 +0000941 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000942 }
943
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000944 bool hasXNACK() const {
945 return AMDGPU::hasXNACK(getSTI());
946 }
947
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000948 bool hasMIMG_R128() const {
949 return AMDGPU::hasMIMG_R128(getSTI());
950 }
951
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000952 bool hasPackedD16() const {
953 return AMDGPU::hasPackedD16(getSTI());
954 }
955
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000956 bool isSI() const {
957 return AMDGPU::isSI(getSTI());
958 }
959
960 bool isCI() const {
961 return AMDGPU::isCI(getSTI());
962 }
963
964 bool isVI() const {
965 return AMDGPU::isVI(getSTI());
966 }
967
Sam Koltonf7659d712017-05-23 10:08:55 +0000968 bool isGFX9() const {
969 return AMDGPU::isGFX9(getSTI());
970 }
971
Matt Arsenault26faed32016-12-05 22:26:17 +0000972 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000973 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +0000974 }
975
Matt Arsenaultfd023142017-06-12 15:55:58 +0000976 bool hasFlatOffsets() const {
977 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
978 }
979
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000980 bool hasSGPR102_SGPR103() const {
981 return !isVI();
982 }
983
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +0000984 bool hasIntClamp() const {
985 return getFeatureBits()[AMDGPU::FeatureIntClamp];
986 }
987
Tom Stellard347ac792015-06-26 21:15:07 +0000988 AMDGPUTargetStreamer &getTargetStreamer() {
989 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
990 return static_cast<AMDGPUTargetStreamer &>(TS);
991 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000992
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000993 const MCRegisterInfo *getMRI() const {
994 // We need this const_cast because for some reason getContext() is not const
995 // in MCAsmParser.
996 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
997 }
998
999 const MCInstrInfo *getMII() const {
1000 return &MII;
1001 }
1002
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001003 const FeatureBitset &getFeatureBits() const {
1004 return getSTI().getFeatureBits();
1005 }
1006
Sam Kolton05ef1c92016-06-03 10:27:37 +00001007 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
1008 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1009 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +00001010
Sam Kolton05ef1c92016-06-03 10:27:37 +00001011 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
1012 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
1013 bool isForcedDPP() const { return ForcedDPP; }
1014 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +00001015 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001016
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001017 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +00001018 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1019 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +00001020 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
1021 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001022 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1023 OperandVector &Operands, MCStreamer &Out,
1024 uint64_t &ErrorInfo,
1025 bool MatchingInlineAsm) override;
1026 bool ParseDirective(AsmToken DirectiveID) override;
1027 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +00001028 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001029 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1030 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001031 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001032
Sam Kolton11de3702016-05-24 12:38:33 +00001033 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001034
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001035 OperandMatchResultTy
1036 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001037 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001038 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001039
1040 OperandMatchResultTy parseOperandArrayWithPrefix(
1041 const char *Prefix,
1042 OperandVector &Operands,
1043 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1044 bool (*ConvertResult)(int64_t&) = nullptr);
1045
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001046 OperandMatchResultTy
1047 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001048 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001049 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1050 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001051
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001052 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
1053 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001054 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001055 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001056 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1057 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1058 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1059 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001060 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Tim Renouf35484c92018-08-21 11:06:05 +00001061 OperandMatchResultTy parseDfmtNfmt(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001062
Tom Stellard45bb48e2015-06-13 03:28:10 +00001063 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001064 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1065 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001066 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001067
1068 bool parseCnt(int64_t &IntVal);
1069 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001070 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001071
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001072private:
1073 struct OperandInfoTy {
1074 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001075 bool IsSymbolic = false;
1076
1077 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001078 };
Sam Kolton11de3702016-05-24 12:38:33 +00001079
Artem Tamazov6edc1352016-05-26 17:00:33 +00001080 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1081 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001082
1083 void errorExpTgt();
1084 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1085
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001086 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
1087 bool validateConstantBusLimitations(const MCInst &Inst);
1088 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001089 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001090 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001091 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001092 bool validateMIMGDataSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001093 bool validateMIMGD16(const MCInst &Inst);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001094 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1095 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1096 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001097
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001098 bool trySkipId(const StringRef Id);
1099 bool trySkipToken(const AsmToken::TokenKind Kind);
1100 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1101 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1102 bool parseExpr(int64_t &Imm);
1103
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001104public:
Sam Kolton11de3702016-05-24 12:38:33 +00001105 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001106 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001107
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001108 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001109 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001110 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1111 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001112 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1113
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001114 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1115 const unsigned MinVal,
1116 const unsigned MaxVal,
1117 const StringRef ErrMsg);
1118 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1119 bool parseSwizzleOffset(int64_t &Imm);
1120 bool parseSwizzleMacro(int64_t &Imm);
1121 bool parseSwizzleQuadPerm(int64_t &Imm);
1122 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1123 bool parseSwizzleBroadcast(int64_t &Imm);
1124 bool parseSwizzleSwap(int64_t &Imm);
1125 bool parseSwizzleReverse(int64_t &Imm);
1126
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001127 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1128 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1129 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001130 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001131 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1132
Sam Kolton5f10a132016-05-06 11:31:17 +00001133 AMDGPUOperand::Ptr defaultGLC() const;
1134 AMDGPUOperand::Ptr defaultSLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001135
Artem Tamazov54bfd542016-10-31 16:07:39 +00001136 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1137 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001138 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001139 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001140 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001141
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001142 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1143
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001144 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1145 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001146 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001147 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001148 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001149
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001150 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1151
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001152 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1153 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001154 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001155
Sam Kolton11de3702016-05-24 12:38:33 +00001156 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001157 AMDGPUOperand::Ptr defaultRowMask() const;
1158 AMDGPUOperand::Ptr defaultBankMask() const;
1159 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1160 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001161
Sam Kolton05ef1c92016-06-03 10:27:37 +00001162 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1163 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001164 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001165 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1166 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001167 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001168 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1169 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001170 uint64_t BasicInstType, bool skipVcc = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001171};
1172
1173struct OptionalOperand {
1174 const char *Name;
1175 AMDGPUOperand::ImmTy Type;
1176 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001177 bool (*ConvertResult)(int64_t&);
1178};
1179
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001180} // end anonymous namespace
1181
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001182// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001183static const fltSemantics *getFltSemantics(unsigned Size) {
1184 switch (Size) {
1185 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001186 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001187 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001188 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001189 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001190 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001191 default:
1192 llvm_unreachable("unsupported fp type");
1193 }
1194}
1195
Matt Arsenault4bd72362016-12-10 00:39:12 +00001196static const fltSemantics *getFltSemantics(MVT VT) {
1197 return getFltSemantics(VT.getSizeInBits() / 8);
1198}
1199
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001200static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1201 switch (OperandType) {
1202 case AMDGPU::OPERAND_REG_IMM_INT32:
1203 case AMDGPU::OPERAND_REG_IMM_FP32:
1204 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1205 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1206 return &APFloat::IEEEsingle();
1207 case AMDGPU::OPERAND_REG_IMM_INT64:
1208 case AMDGPU::OPERAND_REG_IMM_FP64:
1209 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1210 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1211 return &APFloat::IEEEdouble();
1212 case AMDGPU::OPERAND_REG_IMM_INT16:
1213 case AMDGPU::OPERAND_REG_IMM_FP16:
1214 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1215 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1216 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1217 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1218 return &APFloat::IEEEhalf();
1219 default:
1220 llvm_unreachable("unsupported fp type");
1221 }
1222}
1223
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001224//===----------------------------------------------------------------------===//
1225// Operand
1226//===----------------------------------------------------------------------===//
1227
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001228static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1229 bool Lost;
1230
1231 // Convert literal to single precision
1232 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1233 APFloat::rmNearestTiesToEven,
1234 &Lost);
1235 // We allow precision lost but not overflow or underflow
1236 if (Status != APFloat::opOK &&
1237 Lost &&
1238 ((Status & APFloat::opOverflow) != 0 ||
1239 (Status & APFloat::opUnderflow) != 0)) {
1240 return false;
1241 }
1242
1243 return true;
1244}
1245
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001246bool AMDGPUOperand::isInlinableImm(MVT type) const {
1247 if (!isImmTy(ImmTyNone)) {
1248 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1249 return false;
1250 }
1251 // TODO: We should avoid using host float here. It would be better to
1252 // check the float bit values which is what a few other places do.
1253 // We've had bot failures before due to weird NaN support on mips hosts.
1254
1255 APInt Literal(64, Imm.Val);
1256
1257 if (Imm.IsFPImm) { // We got fp literal token
1258 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001259 return AMDGPU::isInlinableLiteral64(Imm.Val,
1260 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001261 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001262
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001263 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001264 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1265 return false;
1266
Sam Kolton9dffada2017-01-17 15:26:02 +00001267 if (type.getScalarSizeInBits() == 16) {
1268 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001269 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001270 AsmParser->hasInv2PiInlineImm());
1271 }
1272
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001273 // Check if single precision literal is inlinable
1274 return AMDGPU::isInlinableLiteral32(
1275 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001276 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001277 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001278
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001279 // We got int literal token.
1280 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001281 return AMDGPU::isInlinableLiteral64(Imm.Val,
1282 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001283 }
1284
Matt Arsenault4bd72362016-12-10 00:39:12 +00001285 if (type.getScalarSizeInBits() == 16) {
1286 return AMDGPU::isInlinableLiteral16(
1287 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1288 AsmParser->hasInv2PiInlineImm());
1289 }
1290
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001291 return AMDGPU::isInlinableLiteral32(
1292 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001293 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001294}
1295
1296bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001297 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001298 if (!isImmTy(ImmTyNone)) {
1299 return false;
1300 }
1301
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001302 if (!Imm.IsFPImm) {
1303 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001304
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001305 if (type == MVT::f64 && hasFPModifiers()) {
1306 // Cannot apply fp modifiers to int literals preserving the same semantics
1307 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1308 // disable these cases.
1309 return false;
1310 }
1311
Matt Arsenault4bd72362016-12-10 00:39:12 +00001312 unsigned Size = type.getSizeInBits();
1313 if (Size == 64)
1314 Size = 32;
1315
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001316 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1317 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001318 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001319 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001320
1321 // We got fp literal token
1322 if (type == MVT::f64) { // Expected 64-bit fp operand
1323 // We would set low 64-bits of literal to zeroes but we accept this literals
1324 return true;
1325 }
1326
1327 if (type == MVT::i64) { // Expected 64-bit int operand
1328 // We don't allow fp literals in 64-bit integer instructions. It is
1329 // unclear how we should encode them.
1330 return false;
1331 }
1332
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001333 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001334 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001335}
1336
1337bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001338 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001339}
1340
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001341bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001342 if (AsmParser->isVI())
1343 return isVReg();
1344 else if (AsmParser->isGFX9())
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001345 return isRegKind() || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001346 else
1347 return false;
1348}
1349
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001350bool AMDGPUOperand::isSDWAFP16Operand() const {
1351 return isSDWAOperand(MVT::f16);
1352}
1353
1354bool AMDGPUOperand::isSDWAFP32Operand() const {
1355 return isSDWAOperand(MVT::f32);
1356}
1357
1358bool AMDGPUOperand::isSDWAInt16Operand() const {
1359 return isSDWAOperand(MVT::i16);
1360}
1361
1362bool AMDGPUOperand::isSDWAInt32Operand() const {
1363 return isSDWAOperand(MVT::i32);
1364}
1365
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001366uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1367{
1368 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1369 assert(Size == 2 || Size == 4 || Size == 8);
1370
1371 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1372
1373 if (Imm.Mods.Abs) {
1374 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001375 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001376 if (Imm.Mods.Neg) {
1377 Val ^= FpSignMask;
1378 }
1379
1380 return Val;
1381}
1382
1383void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001384 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1385 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001386 addLiteralImmOperand(Inst, Imm.Val,
1387 ApplyModifiers &
1388 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001389 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001390 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1391 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001392 }
1393}
1394
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001395void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001396 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1397 auto OpNum = Inst.getNumOperands();
1398 // Check that this operand accepts literals
1399 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1400
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001401 if (ApplyModifiers) {
1402 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1403 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1404 Val = applyInputFPModifiers(Val, Size);
1405 }
1406
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001407 APInt Literal(64, Val);
1408 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001409
1410 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001411 switch (OpTy) {
1412 case AMDGPU::OPERAND_REG_IMM_INT64:
1413 case AMDGPU::OPERAND_REG_IMM_FP64:
1414 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001415 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001416 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1417 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001418 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001419 return;
1420 }
1421
1422 // Non-inlineable
1423 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001424 // For fp operands we check if low 32 bits are zeros
1425 if (Literal.getLoBits(32) != 0) {
1426 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001427 "Can't encode literal as exact 64-bit floating-point operand. "
1428 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001429 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001430
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001431 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001432 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001433 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001434
1435 // We don't allow fp literals in 64-bit integer instructions. It is
1436 // unclear how we should encode them. This case should be checked earlier
1437 // in predicate methods (isLiteralImm())
1438 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001439
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001440 case AMDGPU::OPERAND_REG_IMM_INT32:
1441 case AMDGPU::OPERAND_REG_IMM_FP32:
1442 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1443 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1444 case AMDGPU::OPERAND_REG_IMM_INT16:
1445 case AMDGPU::OPERAND_REG_IMM_FP16:
1446 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1447 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1448 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1449 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001450 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001451 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001452 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001453 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001454 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001455 // We allow precision lost but not overflow or underflow. This should be
1456 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001457
1458 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1459 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1460 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1461 ImmVal |= (ImmVal << 16);
1462 }
1463
1464 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001465 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001466 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001467 default:
1468 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001469 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001470
1471 return;
1472 }
1473
1474 // We got int literal token.
1475 // Only sign extend inline immediates.
1476 // FIXME: No errors on truncation
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001477 switch (OpTy) {
1478 case AMDGPU::OPERAND_REG_IMM_INT32:
1479 case AMDGPU::OPERAND_REG_IMM_FP32:
1480 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001481 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001482 if (isInt<32>(Val) &&
1483 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1484 AsmParser->hasInv2PiInlineImm())) {
1485 Inst.addOperand(MCOperand::createImm(Val));
1486 return;
1487 }
1488
1489 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1490 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001491
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001492 case AMDGPU::OPERAND_REG_IMM_INT64:
1493 case AMDGPU::OPERAND_REG_IMM_FP64:
1494 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001495 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001496 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001497 Inst.addOperand(MCOperand::createImm(Val));
1498 return;
1499 }
1500
1501 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1502 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001503
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001504 case AMDGPU::OPERAND_REG_IMM_INT16:
1505 case AMDGPU::OPERAND_REG_IMM_FP16:
1506 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001507 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001508 if (isInt<16>(Val) &&
1509 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1510 AsmParser->hasInv2PiInlineImm())) {
1511 Inst.addOperand(MCOperand::createImm(Val));
1512 return;
1513 }
1514
1515 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1516 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001517
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001518 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1519 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1520 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1521 assert(AMDGPU::isInlinableLiteral16(LiteralVal,
1522 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001523
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001524 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1525 static_cast<uint32_t>(LiteralVal);
1526 Inst.addOperand(MCOperand::createImm(ImmVal));
1527 return;
1528 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001529 default:
1530 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001531 }
1532}
1533
Matt Arsenault4bd72362016-12-10 00:39:12 +00001534template <unsigned Bitwidth>
1535void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001536 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001537
1538 if (!Imm.IsFPImm) {
1539 // We got int literal token.
1540 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1541 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001542 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001543
1544 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001545 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001546 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1547 APFloat::rmNearestTiesToEven, &Lost);
1548 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001549}
1550
1551void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1552 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1553}
1554
1555//===----------------------------------------------------------------------===//
1556// AsmParser
1557//===----------------------------------------------------------------------===//
1558
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001559static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1560 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001561 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001562 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001563 case 1: return AMDGPU::VGPR_32RegClassID;
1564 case 2: return AMDGPU::VReg_64RegClassID;
1565 case 3: return AMDGPU::VReg_96RegClassID;
1566 case 4: return AMDGPU::VReg_128RegClassID;
1567 case 8: return AMDGPU::VReg_256RegClassID;
1568 case 16: return AMDGPU::VReg_512RegClassID;
1569 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001570 } else if (Is == IS_TTMP) {
1571 switch (RegWidth) {
1572 default: return -1;
1573 case 1: return AMDGPU::TTMP_32RegClassID;
1574 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001575 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001576 case 8: return AMDGPU::TTMP_256RegClassID;
1577 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001578 }
1579 } else if (Is == IS_SGPR) {
1580 switch (RegWidth) {
1581 default: return -1;
1582 case 1: return AMDGPU::SGPR_32RegClassID;
1583 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001584 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001585 case 8: return AMDGPU::SGPR_256RegClassID;
1586 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001587 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001588 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001589 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001590}
1591
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001592static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001593 return StringSwitch<unsigned>(RegName)
1594 .Case("exec", AMDGPU::EXEC)
1595 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001596 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001597 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001598 .Case("m0", AMDGPU::M0)
1599 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001600 .Case("tba", AMDGPU::TBA)
1601 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001602 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1603 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001604 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1605 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001606 .Case("vcc_lo", AMDGPU::VCC_LO)
1607 .Case("vcc_hi", AMDGPU::VCC_HI)
1608 .Case("exec_lo", AMDGPU::EXEC_LO)
1609 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001610 .Case("tma_lo", AMDGPU::TMA_LO)
1611 .Case("tma_hi", AMDGPU::TMA_HI)
1612 .Case("tba_lo", AMDGPU::TBA_LO)
1613 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001614 .Default(0);
1615}
1616
Eugene Zelenko66203762017-01-21 00:53:49 +00001617bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1618 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001619 auto R = parseRegister();
1620 if (!R) return true;
1621 assert(R->isReg());
1622 RegNo = R->getReg();
1623 StartLoc = R->getStartLoc();
1624 EndLoc = R->getEndLoc();
1625 return false;
1626}
1627
Eugene Zelenko66203762017-01-21 00:53:49 +00001628bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1629 RegisterKind RegKind, unsigned Reg1,
1630 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001631 switch (RegKind) {
1632 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001633 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1634 Reg = AMDGPU::EXEC;
1635 RegWidth = 2;
1636 return true;
1637 }
1638 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1639 Reg = AMDGPU::FLAT_SCR;
1640 RegWidth = 2;
1641 return true;
1642 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001643 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1644 Reg = AMDGPU::XNACK_MASK;
1645 RegWidth = 2;
1646 return true;
1647 }
Eugene Zelenko66203762017-01-21 00:53:49 +00001648 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1649 Reg = AMDGPU::VCC;
1650 RegWidth = 2;
1651 return true;
1652 }
1653 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1654 Reg = AMDGPU::TBA;
1655 RegWidth = 2;
1656 return true;
1657 }
1658 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1659 Reg = AMDGPU::TMA;
1660 RegWidth = 2;
1661 return true;
1662 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001663 return false;
1664 case IS_VGPR:
1665 case IS_SGPR:
1666 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001667 if (Reg1 != Reg + RegWidth) {
1668 return false;
1669 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001670 RegWidth++;
1671 return true;
1672 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001673 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001674 }
1675}
1676
Eugene Zelenko66203762017-01-21 00:53:49 +00001677bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1678 unsigned &RegNum, unsigned &RegWidth,
1679 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001680 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001681 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1682 if (getLexer().is(AsmToken::Identifier)) {
1683 StringRef RegName = Parser.getTok().getString();
1684 if ((Reg = getSpecialRegForName(RegName))) {
1685 Parser.Lex();
1686 RegKind = IS_SPECIAL;
1687 } else {
1688 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001689 if (RegName[0] == 'v') {
1690 RegNumIndex = 1;
1691 RegKind = IS_VGPR;
1692 } else if (RegName[0] == 's') {
1693 RegNumIndex = 1;
1694 RegKind = IS_SGPR;
1695 } else if (RegName.startswith("ttmp")) {
1696 RegNumIndex = strlen("ttmp");
1697 RegKind = IS_TTMP;
1698 } else {
1699 return false;
1700 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001701 if (RegName.size() > RegNumIndex) {
1702 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001703 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1704 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001705 Parser.Lex();
1706 RegWidth = 1;
1707 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001708 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001709 Parser.Lex();
1710 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001711 if (getLexer().isNot(AsmToken::LBrac))
1712 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001713 Parser.Lex();
1714
Artem Tamazovf88397c2016-06-03 14:41:17 +00001715 if (getParser().parseAbsoluteExpression(RegLo))
1716 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001717
Artem Tamazov7da9b822016-05-27 12:50:13 +00001718 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001719 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1720 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001721 Parser.Lex();
1722
Artem Tamazov7da9b822016-05-27 12:50:13 +00001723 if (isRBrace) {
1724 RegHi = RegLo;
1725 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001726 if (getParser().parseAbsoluteExpression(RegHi))
1727 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001728
Artem Tamazovf88397c2016-06-03 14:41:17 +00001729 if (getLexer().isNot(AsmToken::RBrac))
1730 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001731 Parser.Lex();
1732 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001733 RegNum = (unsigned) RegLo;
1734 RegWidth = (RegHi - RegLo) + 1;
1735 }
1736 }
1737 } else if (getLexer().is(AsmToken::LBrac)) {
1738 // List of consecutive registers: [s0,s1,s2,s3]
1739 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001740 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001741 return false;
1742 if (RegWidth != 1)
1743 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001744 RegisterKind RegKind1;
1745 unsigned Reg1, RegNum1, RegWidth1;
1746 do {
1747 if (getLexer().is(AsmToken::Comma)) {
1748 Parser.Lex();
1749 } else if (getLexer().is(AsmToken::RBrac)) {
1750 Parser.Lex();
1751 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001752 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001753 if (RegWidth1 != 1) {
1754 return false;
1755 }
1756 if (RegKind1 != RegKind) {
1757 return false;
1758 }
1759 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1760 return false;
1761 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001762 } else {
1763 return false;
1764 }
1765 } while (true);
1766 } else {
1767 return false;
1768 }
1769 switch (RegKind) {
1770 case IS_SPECIAL:
1771 RegNum = 0;
1772 RegWidth = 1;
1773 break;
1774 case IS_VGPR:
1775 case IS_SGPR:
1776 case IS_TTMP:
1777 {
1778 unsigned Size = 1;
1779 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001780 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001781 Size = std::min(RegWidth, 4u);
1782 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001783 if (RegNum % Size != 0)
1784 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001785 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001786 RegNum = RegNum / Size;
1787 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001788 if (RCID == -1)
1789 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001790 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001791 if (RegNum >= RC.getNumRegs())
1792 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001793 Reg = RC.getRegister(RegNum);
1794 break;
1795 }
1796
1797 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001798 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001799 }
1800
Artem Tamazovf88397c2016-06-03 14:41:17 +00001801 if (!subtargetHasRegister(*TRI, Reg))
1802 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001803 return true;
1804}
1805
Scott Linder1e8c2c72018-06-21 19:38:56 +00001806Optional<StringRef>
1807AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
1808 switch (RegKind) {
1809 case IS_VGPR:
1810 return StringRef(".amdgcn.next_free_vgpr");
1811 case IS_SGPR:
1812 return StringRef(".amdgcn.next_free_sgpr");
1813 default:
1814 return None;
1815 }
1816}
1817
1818void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
1819 auto SymbolName = getGprCountSymbolName(RegKind);
1820 assert(SymbolName && "initializing invalid register kind");
1821 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1822 Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
1823}
1824
1825bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
1826 unsigned DwordRegIndex,
1827 unsigned RegWidth) {
1828 // Symbols are only defined for GCN targets
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00001829 if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6)
Scott Linder1e8c2c72018-06-21 19:38:56 +00001830 return true;
1831
1832 auto SymbolName = getGprCountSymbolName(RegKind);
1833 if (!SymbolName)
1834 return true;
1835 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1836
1837 int64_t NewMax = DwordRegIndex + RegWidth - 1;
1838 int64_t OldCount;
1839
1840 if (!Sym->isVariable())
1841 return !Error(getParser().getTok().getLoc(),
1842 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
1843 if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
1844 return !Error(
1845 getParser().getTok().getLoc(),
1846 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
1847
1848 if (OldCount <= NewMax)
1849 Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
1850
1851 return true;
1852}
1853
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001854std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001855 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001856 SMLoc StartLoc = Tok.getLoc();
1857 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001858 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001859 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001860
Artem Tamazova01cce82016-12-27 16:00:11 +00001861 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001862 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001863 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00001864 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
1865 if (!updateGprCountSymbols(RegKind, DwordRegIndex, RegWidth))
1866 return nullptr;
1867 } else
1868 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001869 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001870}
1871
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001872bool
1873AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1874 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1875 (getLexer().getKind() == AsmToken::Integer ||
1876 getLexer().getKind() == AsmToken::Real)) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001877 // This is a workaround for handling operands like these:
1878 // |1.0|
1879 // |-1|
1880 // This syntax is not compatible with syntax of standard
1881 // MC expressions (due to the trailing '|').
1882
1883 SMLoc EndLoc;
1884 const MCExpr *Expr;
1885
1886 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1887 return true;
1888 }
1889
1890 return !Expr->evaluateAsAbsolute(Val);
1891 }
1892
1893 return getParser().parseAbsoluteExpression(Val);
1894}
1895
Alex Bradbury58eba092016-11-01 16:32:05 +00001896OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001897AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001898 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001899 bool Minus = false;
1900 if (getLexer().getKind() == AsmToken::Minus) {
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00001901 const AsmToken NextToken = getLexer().peekTok();
1902 if (!NextToken.is(AsmToken::Integer) &&
1903 !NextToken.is(AsmToken::Real)) {
1904 return MatchOperand_NoMatch;
1905 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001906 Minus = true;
1907 Parser.Lex();
1908 }
1909
1910 SMLoc S = Parser.getTok().getLoc();
1911 switch(getLexer().getKind()) {
1912 case AsmToken::Integer: {
1913 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001914 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001915 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001916 if (Minus)
1917 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001918 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001919 return MatchOperand_Success;
1920 }
1921 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001922 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001923 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001924 return MatchOperand_ParseFail;
1925
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001926 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001927 if (Minus)
1928 F.changeSign();
1929 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001930 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001931 AMDGPUOperand::ImmTyNone, true));
1932 return MatchOperand_Success;
1933 }
1934 default:
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00001935 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001936 }
1937}
1938
Alex Bradbury58eba092016-11-01 16:32:05 +00001939OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00001940AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001941 if (auto R = parseRegister()) {
1942 assert(R->isReg());
1943 R->Reg.IsForcedVOP3 = isForcedVOP3();
1944 Operands.push_back(std::move(R));
1945 return MatchOperand_Success;
1946 }
Sam Kolton9772eb32017-01-11 11:46:30 +00001947 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001948}
1949
Alex Bradbury58eba092016-11-01 16:32:05 +00001950OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001951AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1952 auto res = parseImm(Operands, AbsMod);
Sam Kolton9772eb32017-01-11 11:46:30 +00001953 if (res != MatchOperand_NoMatch) {
1954 return res;
1955 }
1956
1957 return parseReg(Operands);
1958}
1959
1960OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001961AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1962 bool AllowImm) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001963 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001964
1965 if (getLexer().getKind()== AsmToken::Minus) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001966 const AsmToken NextToken = getLexer().peekTok();
1967
1968 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1969 if (NextToken.is(AsmToken::Minus)) {
1970 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1971 return MatchOperand_ParseFail;
1972 }
1973
1974 // '-' followed by an integer literal N should be interpreted as integer
1975 // negation rather than a floating-point NEG modifier applied to N.
1976 // Beside being contr-intuitive, such use of floating-point NEG modifier
1977 // results in different meaning of integer literals used with VOP1/2/C
1978 // and VOP3, for example:
1979 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1980 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1981 // Negative fp literals should be handled likewise for unifomtity
1982 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1983 Parser.Lex();
1984 Negate = true;
1985 }
1986 }
1987
1988 if (getLexer().getKind() == AsmToken::Identifier &&
1989 Parser.getTok().getString() == "neg") {
1990 if (Negate) {
1991 Error(Parser.getTok().getLoc(), "expected register or immediate");
1992 return MatchOperand_ParseFail;
1993 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001994 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001995 Negate2 = true;
1996 if (getLexer().isNot(AsmToken::LParen)) {
1997 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1998 return MatchOperand_ParseFail;
1999 }
2000 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002001 }
2002
Eugene Zelenko66203762017-01-21 00:53:49 +00002003 if (getLexer().getKind() == AsmToken::Identifier &&
2004 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002005 Parser.Lex();
2006 Abs2 = true;
2007 if (getLexer().isNot(AsmToken::LParen)) {
2008 Error(Parser.getTok().getLoc(), "expected left paren after abs");
2009 return MatchOperand_ParseFail;
2010 }
2011 Parser.Lex();
2012 }
2013
2014 if (getLexer().getKind() == AsmToken::Pipe) {
2015 if (Abs2) {
2016 Error(Parser.getTok().getLoc(), "expected register or immediate");
2017 return MatchOperand_ParseFail;
2018 }
2019 Parser.Lex();
2020 Abs = true;
2021 }
2022
Sam Kolton9772eb32017-01-11 11:46:30 +00002023 OperandMatchResultTy Res;
2024 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002025 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00002026 } else {
2027 Res = parseReg(Operands);
2028 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002029 if (Res != MatchOperand_Success) {
2030 return Res;
2031 }
2032
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002033 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002034 if (Abs) {
2035 if (getLexer().getKind() != AsmToken::Pipe) {
2036 Error(Parser.getTok().getLoc(), "expected vertical bar");
2037 return MatchOperand_ParseFail;
2038 }
2039 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002040 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002041 }
2042 if (Abs2) {
2043 if (getLexer().isNot(AsmToken::RParen)) {
2044 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2045 return MatchOperand_ParseFail;
2046 }
2047 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002048 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002049 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002050
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002051 if (Negate) {
2052 Mods.Neg = true;
2053 } else if (Negate2) {
2054 if (getLexer().isNot(AsmToken::RParen)) {
2055 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2056 return MatchOperand_ParseFail;
2057 }
2058 Parser.Lex();
2059 Mods.Neg = true;
2060 }
2061
Sam Kolton945231a2016-06-10 09:57:59 +00002062 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002063 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002064 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00002065 }
2066 return MatchOperand_Success;
2067}
2068
Alex Bradbury58eba092016-11-01 16:32:05 +00002069OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002070AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
2071 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00002072 bool Sext = false;
2073
Eugene Zelenko66203762017-01-21 00:53:49 +00002074 if (getLexer().getKind() == AsmToken::Identifier &&
2075 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00002076 Parser.Lex();
2077 Sext = true;
2078 if (getLexer().isNot(AsmToken::LParen)) {
2079 Error(Parser.getTok().getLoc(), "expected left paren after sext");
2080 return MatchOperand_ParseFail;
2081 }
2082 Parser.Lex();
2083 }
2084
Sam Kolton9772eb32017-01-11 11:46:30 +00002085 OperandMatchResultTy Res;
2086 if (AllowImm) {
2087 Res = parseRegOrImm(Operands);
2088 } else {
2089 Res = parseReg(Operands);
2090 }
Sam Kolton945231a2016-06-10 09:57:59 +00002091 if (Res != MatchOperand_Success) {
2092 return Res;
2093 }
2094
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002095 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00002096 if (Sext) {
2097 if (getLexer().isNot(AsmToken::RParen)) {
2098 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2099 return MatchOperand_ParseFail;
2100 }
2101 Parser.Lex();
2102 Mods.Sext = true;
2103 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002104
Sam Kolton945231a2016-06-10 09:57:59 +00002105 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002106 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002107 Op.setModifiers(Mods);
2108 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002109
Sam Kolton945231a2016-06-10 09:57:59 +00002110 return MatchOperand_Success;
2111}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002112
Sam Kolton9772eb32017-01-11 11:46:30 +00002113OperandMatchResultTy
2114AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2115 return parseRegOrImmWithFPInputMods(Operands, false);
2116}
2117
2118OperandMatchResultTy
2119AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2120 return parseRegOrImmWithIntInputMods(Operands, false);
2121}
2122
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002123OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
2124 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2125 if (Reg) {
2126 Operands.push_back(std::move(Reg));
2127 return MatchOperand_Success;
2128 }
2129
2130 const AsmToken &Tok = Parser.getTok();
2131 if (Tok.getString() == "off") {
2132 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
2133 AMDGPUOperand::ImmTyOff, false));
2134 Parser.Lex();
2135 return MatchOperand_Success;
2136 }
2137
2138 return MatchOperand_NoMatch;
2139}
2140
Tom Stellard45bb48e2015-06-13 03:28:10 +00002141unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002142 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2143
2144 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002145 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2146 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2147 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002148 return Match_InvalidOperand;
2149
Tom Stellard88e0b252015-10-06 15:57:53 +00002150 if ((TSFlags & SIInstrFlags::VOP3) &&
2151 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2152 getForcedEncodingSize() != 64)
2153 return Match_PreferE32;
2154
Sam Koltona568e3d2016-12-22 12:57:41 +00002155 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2156 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002157 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002158 auto OpNum =
2159 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002160 const auto &Op = Inst.getOperand(OpNum);
2161 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2162 return Match_InvalidOperand;
2163 }
2164 }
2165
Matt Arsenaultfd023142017-06-12 15:55:58 +00002166 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2167 // FIXME: Produces error without correct column reported.
2168 auto OpNum =
2169 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2170 const auto &Op = Inst.getOperand(OpNum);
2171 if (Op.getImm() != 0)
2172 return Match_InvalidOperand;
2173 }
2174
Tom Stellard45bb48e2015-06-13 03:28:10 +00002175 return Match_Success;
2176}
2177
Matt Arsenault5f45e782017-01-09 18:44:11 +00002178// What asm variants we should check
2179ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2180 if (getForcedEncodingSize() == 32) {
2181 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2182 return makeArrayRef(Variants);
2183 }
2184
2185 if (isForcedVOP3()) {
2186 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2187 return makeArrayRef(Variants);
2188 }
2189
2190 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002191 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2192 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002193 return makeArrayRef(Variants);
2194 }
2195
2196 if (isForcedDPP()) {
2197 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2198 return makeArrayRef(Variants);
2199 }
2200
2201 static const unsigned Variants[] = {
2202 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002203 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002204 };
2205
2206 return makeArrayRef(Variants);
2207}
2208
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002209unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2210 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2211 const unsigned Num = Desc.getNumImplicitUses();
2212 for (unsigned i = 0; i < Num; ++i) {
2213 unsigned Reg = Desc.ImplicitUses[i];
2214 switch (Reg) {
2215 case AMDGPU::FLAT_SCR:
2216 case AMDGPU::VCC:
2217 case AMDGPU::M0:
2218 return Reg;
2219 default:
2220 break;
2221 }
2222 }
2223 return AMDGPU::NoRegister;
2224}
2225
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002226// NB: This code is correct only when used to check constant
2227// bus limitations because GFX7 support no f16 inline constants.
2228// Note that there are no cases when a GFX7 opcode violates
2229// constant bus limitations due to the use of an f16 constant.
2230bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2231 unsigned OpIdx) const {
2232 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2233
2234 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2235 return false;
2236 }
2237
2238 const MCOperand &MO = Inst.getOperand(OpIdx);
2239
2240 int64_t Val = MO.getImm();
2241 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2242
2243 switch (OpSize) { // expected operand size
2244 case 8:
2245 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2246 case 4:
2247 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2248 case 2: {
2249 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2250 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2251 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2252 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2253 } else {
2254 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2255 }
2256 }
2257 default:
2258 llvm_unreachable("invalid operand size");
2259 }
2260}
2261
2262bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2263 const MCOperand &MO = Inst.getOperand(OpIdx);
2264 if (MO.isImm()) {
2265 return !isInlineConstant(Inst, OpIdx);
2266 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002267 return !MO.isReg() ||
2268 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002269}
2270
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002271bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002272 const unsigned Opcode = Inst.getOpcode();
2273 const MCInstrDesc &Desc = MII.get(Opcode);
2274 unsigned ConstantBusUseCount = 0;
2275
2276 if (Desc.TSFlags &
2277 (SIInstrFlags::VOPC |
2278 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002279 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2280 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002281 // Check special imm operands (used by madmk, etc)
2282 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2283 ++ConstantBusUseCount;
2284 }
2285
2286 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2287 if (SGPRUsed != AMDGPU::NoRegister) {
2288 ++ConstantBusUseCount;
2289 }
2290
2291 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2292 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2293 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2294
2295 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2296
2297 for (int OpIdx : OpIndices) {
2298 if (OpIdx == -1) break;
2299
2300 const MCOperand &MO = Inst.getOperand(OpIdx);
2301 if (usesConstantBus(Inst, OpIdx)) {
2302 if (MO.isReg()) {
2303 const unsigned Reg = mc2PseudoReg(MO.getReg());
2304 // Pairs of registers with a partial intersections like these
2305 // s0, s[0:1]
2306 // flat_scratch_lo, flat_scratch
2307 // flat_scratch_lo, flat_scratch_hi
2308 // are theoretically valid but they are disabled anyway.
2309 // Note that this code mimics SIInstrInfo::verifyInstruction
2310 if (Reg != SGPRUsed) {
2311 ++ConstantBusUseCount;
2312 }
2313 SGPRUsed = Reg;
2314 } else { // Expression or a literal
2315 ++ConstantBusUseCount;
2316 }
2317 }
2318 }
2319 }
2320
2321 return ConstantBusUseCount <= 1;
2322}
2323
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002324bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002325 const unsigned Opcode = Inst.getOpcode();
2326 const MCInstrDesc &Desc = MII.get(Opcode);
2327
2328 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2329 if (DstIdx == -1 ||
2330 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2331 return true;
2332 }
2333
2334 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2335
2336 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2337 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2338 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2339
2340 assert(DstIdx != -1);
2341 const MCOperand &Dst = Inst.getOperand(DstIdx);
2342 assert(Dst.isReg());
2343 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2344
2345 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2346
2347 for (int SrcIdx : SrcIndices) {
2348 if (SrcIdx == -1) break;
2349 const MCOperand &Src = Inst.getOperand(SrcIdx);
2350 if (Src.isReg()) {
2351 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2352 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2353 return false;
2354 }
2355 }
2356 }
2357
2358 return true;
2359}
2360
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002361bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2362
2363 const unsigned Opc = Inst.getOpcode();
2364 const MCInstrDesc &Desc = MII.get(Opc);
2365
2366 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2367 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2368 assert(ClampIdx != -1);
2369 return Inst.getOperand(ClampIdx).getImm() == 0;
2370 }
2371
2372 return true;
2373}
2374
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002375bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2376
2377 const unsigned Opc = Inst.getOpcode();
2378 const MCInstrDesc &Desc = MII.get(Opc);
2379
2380 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2381 return true;
2382
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002383 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2384 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2385 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2386
2387 assert(VDataIdx != -1);
2388 assert(DMaskIdx != -1);
2389 assert(TFEIdx != -1);
2390
2391 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2392 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2393 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2394 if (DMask == 0)
2395 DMask = 1;
2396
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002397 unsigned DataSize =
2398 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2399 if (hasPackedD16()) {
2400 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2401 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2402 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002403 }
2404
2405 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002406}
2407
2408bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2409
2410 const unsigned Opc = Inst.getOpcode();
2411 const MCInstrDesc &Desc = MII.get(Opc);
2412
2413 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2414 return true;
2415 if (!Desc.mayLoad() || !Desc.mayStore())
2416 return true; // Not atomic
2417
2418 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2419 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2420
2421 // This is an incomplete check because image_atomic_cmpswap
2422 // may only use 0x3 and 0xf while other atomic operations
2423 // may use 0x1 and 0x3. However these limitations are
2424 // verified when we check that dmask matches dst size.
2425 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2426}
2427
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002428bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
2429
2430 const unsigned Opc = Inst.getOpcode();
2431 const MCInstrDesc &Desc = MII.get(Opc);
2432
2433 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
2434 return true;
2435
2436 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2437 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2438
2439 // GATHER4 instructions use dmask in a different fashion compared to
2440 // other MIMG instructions. The only useful DMASK values are
2441 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2442 // (red,red,red,red) etc.) The ISA document doesn't mention
2443 // this.
2444 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
2445}
2446
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002447bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2448
2449 const unsigned Opc = Inst.getOpcode();
2450 const MCInstrDesc &Desc = MII.get(Opc);
2451
2452 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2453 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002454
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002455 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2456 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
2457 if (isCI() || isSI())
2458 return false;
2459 }
2460
2461 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002462}
2463
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002464bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2465 const SMLoc &IDLoc) {
2466 if (!validateConstantBusLimitations(Inst)) {
2467 Error(IDLoc,
2468 "invalid operand (violates constant bus restrictions)");
2469 return false;
2470 }
2471 if (!validateEarlyClobberLimitations(Inst)) {
2472 Error(IDLoc,
2473 "destination must be different than all sources");
2474 return false;
2475 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002476 if (!validateIntClampSupported(Inst)) {
2477 Error(IDLoc,
2478 "integer clamping is not supported on this GPU");
2479 return false;
2480 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002481 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
2482 if (!validateMIMGD16(Inst)) {
2483 Error(IDLoc,
2484 "d16 modifier is not supported on this GPU");
2485 return false;
2486 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002487 if (!validateMIMGDataSize(Inst)) {
2488 Error(IDLoc,
2489 "image data size does not match dmask and tfe");
2490 return false;
2491 }
2492 if (!validateMIMGAtomicDMask(Inst)) {
2493 Error(IDLoc,
2494 "invalid atomic image dmask");
2495 return false;
2496 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002497 if (!validateMIMGGatherDMask(Inst)) {
2498 Error(IDLoc,
2499 "invalid image_gather dmask: only one bit must be set");
2500 return false;
2501 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002502
2503 return true;
2504}
2505
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002506static std::string AMDGPUMnemonicSpellCheck(StringRef S, uint64_t FBS,
2507 unsigned VariantID = 0);
2508
Tom Stellard45bb48e2015-06-13 03:28:10 +00002509bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2510 OperandVector &Operands,
2511 MCStreamer &Out,
2512 uint64_t &ErrorInfo,
2513 bool MatchingInlineAsm) {
2514 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002515 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002516 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002517 uint64_t EI;
2518 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2519 Variant);
2520 // We order match statuses from least to most specific. We use most specific
2521 // status as resulting
2522 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2523 if ((R == Match_Success) ||
2524 (R == Match_PreferE32) ||
2525 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2526 (R == Match_InvalidOperand && Result != Match_MissingFeature
2527 && Result != Match_PreferE32) ||
2528 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2529 && Result != Match_MissingFeature
2530 && Result != Match_PreferE32)) {
2531 Result = R;
2532 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002533 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002534 if (R == Match_Success)
2535 break;
2536 }
2537
2538 switch (Result) {
2539 default: break;
2540 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002541 if (!validateInstruction(Inst, IDLoc)) {
2542 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002543 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002544 Inst.setLoc(IDLoc);
2545 Out.EmitInstruction(Inst, getSTI());
2546 return false;
2547
2548 case Match_MissingFeature:
2549 return Error(IDLoc, "instruction not supported on this GPU");
2550
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002551 case Match_MnemonicFail: {
2552 uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
2553 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2554 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2555 return Error(IDLoc, "invalid instruction" + Suggestion,
2556 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2557 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002558
2559 case Match_InvalidOperand: {
2560 SMLoc ErrorLoc = IDLoc;
2561 if (ErrorInfo != ~0ULL) {
2562 if (ErrorInfo >= Operands.size()) {
2563 return Error(IDLoc, "too few operands for instruction");
2564 }
2565 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2566 if (ErrorLoc == SMLoc())
2567 ErrorLoc = IDLoc;
2568 }
2569 return Error(ErrorLoc, "invalid operand for instruction");
2570 }
2571
2572 case Match_PreferE32:
2573 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2574 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002575 }
2576 llvm_unreachable("Implement any new match types added!");
2577}
2578
Artem Tamazov25478d82016-12-29 15:41:52 +00002579bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2580 int64_t Tmp = -1;
2581 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2582 return true;
2583 }
2584 if (getParser().parseAbsoluteExpression(Tmp)) {
2585 return true;
2586 }
2587 Ret = static_cast<uint32_t>(Tmp);
2588 return false;
2589}
2590
Tom Stellard347ac792015-06-26 21:15:07 +00002591bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2592 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002593 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002594 return TokError("invalid major version");
2595
Tom Stellard347ac792015-06-26 21:15:07 +00002596 if (getLexer().isNot(AsmToken::Comma))
2597 return TokError("minor version number required, comma expected");
2598 Lex();
2599
Artem Tamazov25478d82016-12-29 15:41:52 +00002600 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002601 return TokError("invalid minor version");
2602
Tom Stellard347ac792015-06-26 21:15:07 +00002603 return false;
2604}
2605
Scott Linder1e8c2c72018-06-21 19:38:56 +00002606bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
2607 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2608 return TokError("directive only supported for amdgcn architecture");
2609
2610 std::string Target;
2611
2612 SMLoc TargetStart = getTok().getLoc();
2613 if (getParser().parseEscapedString(Target))
2614 return true;
2615 SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
2616
2617 std::string ExpectedTarget;
2618 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
2619 IsaInfo::streamIsaVersion(&getSTI(), ExpectedTargetOS);
2620
2621 if (Target != ExpectedTargetOS.str())
2622 return getParser().Error(TargetRange.Start, "target must match options",
2623 TargetRange);
2624
2625 getTargetStreamer().EmitDirectiveAMDGCNTarget(Target);
2626 return false;
2627}
2628
2629bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
2630 return getParser().Error(Range.Start, "value out of range", Range);
2631}
2632
2633bool AMDGPUAsmParser::calculateGPRBlocks(
2634 const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
2635 bool XNACKUsed, unsigned NextFreeVGPR, SMRange VGPRRange,
2636 unsigned NextFreeSGPR, SMRange SGPRRange, unsigned &VGPRBlocks,
2637 unsigned &SGPRBlocks) {
2638 // TODO(scott.linder): These calculations are duplicated from
2639 // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002640 IsaVersion Version = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002641
2642 unsigned NumVGPRs = NextFreeVGPR;
2643 unsigned NumSGPRs = NextFreeSGPR;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002644 unsigned MaxAddressableNumSGPRs = IsaInfo::getAddressableNumSGPRs(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002645
2646 if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
2647 NumSGPRs > MaxAddressableNumSGPRs)
2648 return OutOfRangeError(SGPRRange);
2649
2650 NumSGPRs +=
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002651 IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed);
Scott Linder1e8c2c72018-06-21 19:38:56 +00002652
2653 if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
2654 NumSGPRs > MaxAddressableNumSGPRs)
2655 return OutOfRangeError(SGPRRange);
2656
2657 if (Features.test(FeatureSGPRInitBug))
2658 NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
2659
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002660 VGPRBlocks = IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs);
2661 SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs);
Scott Linder1e8c2c72018-06-21 19:38:56 +00002662
2663 return false;
2664}
2665
2666bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
2667 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2668 return TokError("directive only supported for amdgcn architecture");
2669
2670 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
2671 return TokError("directive only supported for amdhsa OS");
2672
2673 StringRef KernelName;
2674 if (getParser().parseIdentifier(KernelName))
2675 return true;
2676
2677 kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor();
2678
2679 StringSet<> Seen;
2680
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002681 IsaVersion IVersion = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002682
2683 SMRange VGPRRange;
2684 uint64_t NextFreeVGPR = 0;
2685 SMRange SGPRRange;
2686 uint64_t NextFreeSGPR = 0;
2687 unsigned UserSGPRCount = 0;
2688 bool ReserveVCC = true;
2689 bool ReserveFlatScr = true;
2690 bool ReserveXNACK = hasXNACK();
2691
2692 while (true) {
2693 while (getLexer().is(AsmToken::EndOfStatement))
2694 Lex();
2695
2696 if (getLexer().isNot(AsmToken::Identifier))
2697 return TokError("expected .amdhsa_ directive or .end_amdhsa_kernel");
2698
2699 StringRef ID = getTok().getIdentifier();
2700 SMRange IDRange = getTok().getLocRange();
2701 Lex();
2702
2703 if (ID == ".end_amdhsa_kernel")
2704 break;
2705
2706 if (Seen.find(ID) != Seen.end())
2707 return TokError(".amdhsa_ directives cannot be repeated");
2708 Seen.insert(ID);
2709
2710 SMLoc ValStart = getTok().getLoc();
2711 int64_t IVal;
2712 if (getParser().parseAbsoluteExpression(IVal))
2713 return true;
2714 SMLoc ValEnd = getTok().getLoc();
2715 SMRange ValRange = SMRange(ValStart, ValEnd);
2716
2717 if (IVal < 0)
2718 return OutOfRangeError(ValRange);
2719
2720 uint64_t Val = IVal;
2721
2722#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
2723 if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
2724 return OutOfRangeError(RANGE); \
2725 AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
2726
2727 if (ID == ".amdhsa_group_segment_fixed_size") {
2728 if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
2729 return OutOfRangeError(ValRange);
2730 KD.group_segment_fixed_size = Val;
2731 } else if (ID == ".amdhsa_private_segment_fixed_size") {
2732 if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
2733 return OutOfRangeError(ValRange);
2734 KD.private_segment_fixed_size = Val;
2735 } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
2736 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2737 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
2738 Val, ValRange);
2739 UserSGPRCount++;
2740 } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
2741 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2742 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
2743 ValRange);
2744 UserSGPRCount++;
2745 } else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
2746 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2747 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
2748 ValRange);
2749 UserSGPRCount++;
2750 } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
2751 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2752 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
2753 Val, ValRange);
2754 UserSGPRCount++;
2755 } else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
2756 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2757 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
2758 ValRange);
2759 UserSGPRCount++;
2760 } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
2761 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2762 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
2763 ValRange);
2764 UserSGPRCount++;
2765 } else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
2766 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2767 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
2768 Val, ValRange);
2769 UserSGPRCount++;
2770 } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
2771 PARSE_BITS_ENTRY(
2772 KD.compute_pgm_rsrc2,
2773 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET, Val,
2774 ValRange);
2775 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
2776 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2777 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
2778 ValRange);
2779 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
2780 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2781 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
2782 ValRange);
2783 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
2784 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2785 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
2786 ValRange);
2787 } else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
2788 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2789 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
2790 ValRange);
2791 } else if (ID == ".amdhsa_system_vgpr_workitem_id") {
2792 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2793 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
2794 ValRange);
2795 } else if (ID == ".amdhsa_next_free_vgpr") {
2796 VGPRRange = ValRange;
2797 NextFreeVGPR = Val;
2798 } else if (ID == ".amdhsa_next_free_sgpr") {
2799 SGPRRange = ValRange;
2800 NextFreeSGPR = Val;
2801 } else if (ID == ".amdhsa_reserve_vcc") {
2802 if (!isUInt<1>(Val))
2803 return OutOfRangeError(ValRange);
2804 ReserveVCC = Val;
2805 } else if (ID == ".amdhsa_reserve_flat_scratch") {
2806 if (IVersion.Major < 7)
2807 return getParser().Error(IDRange.Start, "directive requires gfx7+",
2808 IDRange);
2809 if (!isUInt<1>(Val))
2810 return OutOfRangeError(ValRange);
2811 ReserveFlatScr = Val;
2812 } else if (ID == ".amdhsa_reserve_xnack_mask") {
2813 if (IVersion.Major < 8)
2814 return getParser().Error(IDRange.Start, "directive requires gfx8+",
2815 IDRange);
2816 if (!isUInt<1>(Val))
2817 return OutOfRangeError(ValRange);
2818 ReserveXNACK = Val;
2819 } else if (ID == ".amdhsa_float_round_mode_32") {
2820 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2821 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
2822 } else if (ID == ".amdhsa_float_round_mode_16_64") {
2823 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2824 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
2825 } else if (ID == ".amdhsa_float_denorm_mode_32") {
2826 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2827 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
2828 } else if (ID == ".amdhsa_float_denorm_mode_16_64") {
2829 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2830 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
2831 ValRange);
2832 } else if (ID == ".amdhsa_dx10_clamp") {
2833 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2834 COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
2835 } else if (ID == ".amdhsa_ieee_mode") {
2836 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
2837 Val, ValRange);
2838 } else if (ID == ".amdhsa_fp16_overflow") {
2839 if (IVersion.Major < 9)
2840 return getParser().Error(IDRange.Start, "directive requires gfx9+",
2841 IDRange);
2842 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
2843 ValRange);
2844 } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
2845 PARSE_BITS_ENTRY(
2846 KD.compute_pgm_rsrc2,
2847 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
2848 ValRange);
2849 } else if (ID == ".amdhsa_exception_fp_denorm_src") {
2850 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2851 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
2852 Val, ValRange);
2853 } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
2854 PARSE_BITS_ENTRY(
2855 KD.compute_pgm_rsrc2,
2856 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
2857 ValRange);
2858 } else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
2859 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2860 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
2861 Val, ValRange);
2862 } else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
2863 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2864 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
2865 Val, ValRange);
2866 } else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
2867 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2868 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
2869 Val, ValRange);
2870 } else if (ID == ".amdhsa_exception_int_div_zero") {
2871 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2872 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
2873 Val, ValRange);
2874 } else {
2875 return getParser().Error(IDRange.Start,
2876 "unknown .amdhsa_kernel directive", IDRange);
2877 }
2878
2879#undef PARSE_BITS_ENTRY
2880 }
2881
2882 if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
2883 return TokError(".amdhsa_next_free_vgpr directive is required");
2884
2885 if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
2886 return TokError(".amdhsa_next_free_sgpr directive is required");
2887
2888 unsigned VGPRBlocks;
2889 unsigned SGPRBlocks;
2890 if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
2891 ReserveXNACK, NextFreeVGPR, VGPRRange, NextFreeSGPR,
2892 SGPRRange, VGPRBlocks, SGPRBlocks))
2893 return true;
2894
2895 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
2896 VGPRBlocks))
2897 return OutOfRangeError(VGPRRange);
2898 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
2899 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
2900
2901 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
2902 SGPRBlocks))
2903 return OutOfRangeError(SGPRRange);
2904 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
2905 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
2906 SGPRBlocks);
2907
2908 if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
2909 return TokError("too many user SGPRs enabled");
2910 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
2911 UserSGPRCount);
2912
2913 getTargetStreamer().EmitAmdhsaKernelDescriptor(
2914 getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
2915 ReserveFlatScr, ReserveXNACK);
2916 return false;
2917}
2918
Tom Stellard347ac792015-06-26 21:15:07 +00002919bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00002920 uint32_t Major;
2921 uint32_t Minor;
2922
2923 if (ParseDirectiveMajorMinor(Major, Minor))
2924 return true;
2925
2926 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
2927 return false;
2928}
2929
2930bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00002931 uint32_t Major;
2932 uint32_t Minor;
2933 uint32_t Stepping;
2934 StringRef VendorName;
2935 StringRef ArchName;
2936
2937 // If this directive has no arguments, then use the ISA version for the
2938 // targeted GPU.
2939 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002940 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002941 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2942 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00002943 "AMD", "AMDGPU");
2944 return false;
2945 }
2946
Tom Stellard347ac792015-06-26 21:15:07 +00002947 if (ParseDirectiveMajorMinor(Major, Minor))
2948 return true;
2949
2950 if (getLexer().isNot(AsmToken::Comma))
2951 return TokError("stepping version number required, comma expected");
2952 Lex();
2953
Artem Tamazov25478d82016-12-29 15:41:52 +00002954 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00002955 return TokError("invalid stepping version");
2956
Tom Stellard347ac792015-06-26 21:15:07 +00002957 if (getLexer().isNot(AsmToken::Comma))
2958 return TokError("vendor name required, comma expected");
2959 Lex();
2960
2961 if (getLexer().isNot(AsmToken::String))
2962 return TokError("invalid vendor name");
2963
2964 VendorName = getLexer().getTok().getStringContents();
2965 Lex();
2966
2967 if (getLexer().isNot(AsmToken::Comma))
2968 return TokError("arch name required, comma expected");
2969 Lex();
2970
2971 if (getLexer().isNot(AsmToken::String))
2972 return TokError("invalid arch name");
2973
2974 ArchName = getLexer().getTok().getStringContents();
2975 Lex();
2976
2977 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
2978 VendorName, ArchName);
2979 return false;
2980}
2981
Tom Stellardff7416b2015-06-26 21:58:31 +00002982bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2983 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00002984 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
2985 // assembly for backwards compatibility.
2986 if (ID == "max_scratch_backing_memory_byte_size") {
2987 Parser.eatToEndOfStatement();
2988 return false;
2989 }
2990
Valery Pykhtindc110542016-03-06 20:25:36 +00002991 SmallString<40> ErrStr;
2992 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00002993 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002994 return TokError(Err.str());
2995 }
Tom Stellardff7416b2015-06-26 21:58:31 +00002996 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00002997 return false;
2998}
2999
3000bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00003001 amd_kernel_code_t Header;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003002 AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
Tom Stellardff7416b2015-06-26 21:58:31 +00003003
3004 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00003005 // Lex EndOfStatement. This is in a while loop, because lexing a comment
3006 // will set the current token to EndOfStatement.
3007 while(getLexer().is(AsmToken::EndOfStatement))
3008 Lex();
3009
3010 if (getLexer().isNot(AsmToken::Identifier))
3011 return TokError("expected value identifier or .end_amd_kernel_code_t");
3012
3013 StringRef ID = getLexer().getTok().getIdentifier();
3014 Lex();
3015
3016 if (ID == ".end_amd_kernel_code_t")
3017 break;
3018
3019 if (ParseAMDKernelCodeTValue(ID, Header))
3020 return true;
3021 }
3022
3023 getTargetStreamer().EmitAMDKernelCodeT(Header);
3024
3025 return false;
3026}
3027
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003028bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
3029 if (getLexer().isNot(AsmToken::Identifier))
3030 return TokError("expected symbol name");
3031
3032 StringRef KernelName = Parser.getTok().getString();
3033
3034 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
3035 ELF::STT_AMDGPU_HSA_KERNEL);
3036 Lex();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003037 if (!AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI()))
3038 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003039 return false;
3040}
3041
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003042bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003043 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
3044 return Error(getParser().getTok().getLoc(),
3045 ".amd_amdgpu_isa directive is not available on non-amdgcn "
3046 "architectures");
3047 }
3048
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003049 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
3050
3051 std::string ISAVersionStringFromSTI;
3052 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
3053 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
3054
3055 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
3056 return Error(getParser().getTok().getLoc(),
3057 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
3058 "arguments specified through the command line");
3059 }
3060
3061 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
3062 Lex();
3063
3064 return false;
3065}
3066
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003067bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003068 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
3069 return Error(getParser().getTok().getLoc(),
3070 (Twine(HSAMD::AssemblerDirectiveBegin) + Twine(" directive is "
3071 "not available on non-amdhsa OSes")).str());
3072 }
3073
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003074 std::string HSAMetadataString;
3075 raw_string_ostream YamlStream(HSAMetadataString);
3076
3077 getLexer().setSkipSpace(false);
3078
3079 bool FoundEnd = false;
3080 while (!getLexer().is(AsmToken::Eof)) {
3081 while (getLexer().is(AsmToken::Space)) {
3082 YamlStream << getLexer().getTok().getString();
3083 Lex();
3084 }
3085
3086 if (getLexer().is(AsmToken::Identifier)) {
3087 StringRef ID = getLexer().getTok().getIdentifier();
3088 if (ID == AMDGPU::HSAMD::AssemblerDirectiveEnd) {
3089 Lex();
3090 FoundEnd = true;
3091 break;
3092 }
3093 }
3094
3095 YamlStream << Parser.parseStringToEndOfStatement()
3096 << getContext().getAsmInfo()->getSeparatorString();
3097
3098 Parser.eatToEndOfStatement();
3099 }
3100
3101 getLexer().setSkipSpace(true);
3102
3103 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
3104 return TokError(Twine("expected directive ") +
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003105 Twine(HSAMD::AssemblerDirectiveEnd) + Twine(" not found"));
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003106 }
3107
3108 YamlStream.flush();
3109
3110 if (!getTargetStreamer().EmitHSAMetadata(HSAMetadataString))
3111 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3112
3113 return false;
3114}
3115
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003116bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003117 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
3118 return Error(getParser().getTok().getLoc(),
3119 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
3120 "not available on non-amdpal OSes")).str());
3121 }
3122
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003123 PALMD::Metadata PALMetadata;
Tim Renouf72800f02017-10-03 19:03:52 +00003124 for (;;) {
3125 uint32_t Value;
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003126 if (ParseAsAbsoluteExpression(Value)) {
3127 return TokError(Twine("invalid value in ") +
3128 Twine(PALMD::AssemblerDirective));
3129 }
3130 PALMetadata.push_back(Value);
Tim Renouf72800f02017-10-03 19:03:52 +00003131 if (getLexer().isNot(AsmToken::Comma))
3132 break;
3133 Lex();
3134 }
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003135 getTargetStreamer().EmitPALMetadata(PALMetadata);
Tim Renouf72800f02017-10-03 19:03:52 +00003136 return false;
3137}
3138
Tom Stellard45bb48e2015-06-13 03:28:10 +00003139bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00003140 StringRef IDVal = DirectiveID.getString();
3141
Scott Linder1e8c2c72018-06-21 19:38:56 +00003142 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
3143 if (IDVal == ".amdgcn_target")
3144 return ParseDirectiveAMDGCNTarget();
Tom Stellard347ac792015-06-26 21:15:07 +00003145
Scott Linder1e8c2c72018-06-21 19:38:56 +00003146 if (IDVal == ".amdhsa_kernel")
3147 return ParseDirectiveAMDHSAKernel();
3148 } else {
3149 if (IDVal == ".hsa_code_object_version")
3150 return ParseDirectiveHSACodeObjectVersion();
Tom Stellard347ac792015-06-26 21:15:07 +00003151
Scott Linder1e8c2c72018-06-21 19:38:56 +00003152 if (IDVal == ".hsa_code_object_isa")
3153 return ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00003154
Scott Linder1e8c2c72018-06-21 19:38:56 +00003155 if (IDVal == ".amd_kernel_code_t")
3156 return ParseDirectiveAMDKernelCodeT();
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003157
Scott Linder1e8c2c72018-06-21 19:38:56 +00003158 if (IDVal == ".amdgpu_hsa_kernel")
3159 return ParseDirectiveAMDGPUHsaKernel();
3160
3161 if (IDVal == ".amd_amdgpu_isa")
3162 return ParseDirectiveISAVersion();
3163 }
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003164
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003165 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
3166 return ParseDirectiveHSAMetadata();
3167
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003168 if (IDVal == PALMD::AssemblerDirective)
3169 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00003170
Tom Stellard45bb48e2015-06-13 03:28:10 +00003171 return true;
3172}
3173
Matt Arsenault68802d32015-11-05 03:11:27 +00003174bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
3175 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003176
3177 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
3178 R.isValid(); ++R) {
3179 if (*R == RegNo)
3180 return isGFX9();
3181 }
3182
3183 switch (RegNo) {
3184 case AMDGPU::TBA:
3185 case AMDGPU::TBA_LO:
3186 case AMDGPU::TBA_HI:
3187 case AMDGPU::TMA:
3188 case AMDGPU::TMA_LO:
3189 case AMDGPU::TMA_HI:
3190 return !isGFX9();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00003191 case AMDGPU::XNACK_MASK:
3192 case AMDGPU::XNACK_MASK_LO:
3193 case AMDGPU::XNACK_MASK_HI:
3194 return !isCI() && !isSI() && hasXNACK();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003195 default:
3196 break;
3197 }
3198
Matt Arsenault3b159672015-12-01 20:31:08 +00003199 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00003200 return true;
3201
Matt Arsenault3b159672015-12-01 20:31:08 +00003202 if (isSI()) {
3203 // No flat_scr
3204 switch (RegNo) {
3205 case AMDGPU::FLAT_SCR:
3206 case AMDGPU::FLAT_SCR_LO:
3207 case AMDGPU::FLAT_SCR_HI:
3208 return false;
3209 default:
3210 return true;
3211 }
3212 }
3213
Matt Arsenault68802d32015-11-05 03:11:27 +00003214 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
3215 // SI/CI have.
3216 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
3217 R.isValid(); ++R) {
3218 if (*R == RegNo)
3219 return false;
3220 }
3221
3222 return true;
3223}
3224
Alex Bradbury58eba092016-11-01 16:32:05 +00003225OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003226AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003227 // Try to parse with a custom parser
3228 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3229
3230 // If we successfully parsed the operand or if there as an error parsing,
3231 // we are done.
3232 //
3233 // If we are parsing after we reach EndOfStatement then this means we
3234 // are appending default values to the Operands list. This is only done
3235 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00003236 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00003237 getLexer().is(AsmToken::EndOfStatement))
3238 return ResTy;
3239
Sam Kolton1bdcef72016-05-23 09:59:02 +00003240 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00003241
Sam Kolton1bdcef72016-05-23 09:59:02 +00003242 if (ResTy == MatchOperand_Success)
3243 return ResTy;
3244
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003245 const auto &Tok = Parser.getTok();
3246 SMLoc S = Tok.getLoc();
Tom Stellard89049702016-06-15 02:54:14 +00003247
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003248 const MCExpr *Expr = nullptr;
3249 if (!Parser.parseExpression(Expr)) {
3250 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
3251 return MatchOperand_Success;
3252 }
3253
3254 // Possibly this is an instruction flag like 'gds'.
3255 if (Tok.getKind() == AsmToken::Identifier) {
3256 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003257 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00003258 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003259 }
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003260
Sam Kolton1bdcef72016-05-23 09:59:02 +00003261 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003262}
3263
Sam Kolton05ef1c92016-06-03 10:27:37 +00003264StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
3265 // Clear any forced encodings from the previous instruction.
3266 setForcedEncodingSize(0);
3267 setForcedDPP(false);
3268 setForcedSDWA(false);
3269
3270 if (Name.endswith("_e64")) {
3271 setForcedEncodingSize(64);
3272 return Name.substr(0, Name.size() - 4);
3273 } else if (Name.endswith("_e32")) {
3274 setForcedEncodingSize(32);
3275 return Name.substr(0, Name.size() - 4);
3276 } else if (Name.endswith("_dpp")) {
3277 setForcedDPP(true);
3278 return Name.substr(0, Name.size() - 4);
3279 } else if (Name.endswith("_sdwa")) {
3280 setForcedSDWA(true);
3281 return Name.substr(0, Name.size() - 5);
3282 }
3283 return Name;
3284}
3285
Tom Stellard45bb48e2015-06-13 03:28:10 +00003286bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
3287 StringRef Name,
3288 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003289 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00003290 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003291 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00003292
Tom Stellard45bb48e2015-06-13 03:28:10 +00003293 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00003294 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003295
3296 // Eat the comma or space if there is one.
3297 if (getLexer().is(AsmToken::Comma))
3298 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00003299
Tom Stellard45bb48e2015-06-13 03:28:10 +00003300 switch (Res) {
3301 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003302 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003303 Error(getLexer().getLoc(), "failed parsing operand.");
3304 while (!getLexer().is(AsmToken::EndOfStatement)) {
3305 Parser.Lex();
3306 }
3307 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003308 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003309 Error(getLexer().getLoc(), "not a valid operand.");
3310 while (!getLexer().is(AsmToken::EndOfStatement)) {
3311 Parser.Lex();
3312 }
3313 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003314 }
3315 }
3316
Tom Stellard45bb48e2015-06-13 03:28:10 +00003317 return false;
3318}
3319
3320//===----------------------------------------------------------------------===//
3321// Utility functions
3322//===----------------------------------------------------------------------===//
3323
Alex Bradbury58eba092016-11-01 16:32:05 +00003324OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00003325AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003326 switch(getLexer().getKind()) {
3327 default: return MatchOperand_NoMatch;
3328 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003329 StringRef Name = Parser.getTok().getString();
3330 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003331 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003332 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003333
3334 Parser.Lex();
3335 if (getLexer().isNot(AsmToken::Colon))
3336 return MatchOperand_ParseFail;
3337
3338 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003339
3340 bool IsMinus = false;
3341 if (getLexer().getKind() == AsmToken::Minus) {
3342 Parser.Lex();
3343 IsMinus = true;
3344 }
3345
Tom Stellard45bb48e2015-06-13 03:28:10 +00003346 if (getLexer().isNot(AsmToken::Integer))
3347 return MatchOperand_ParseFail;
3348
3349 if (getParser().parseAbsoluteExpression(Int))
3350 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003351
3352 if (IsMinus)
3353 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003354 break;
3355 }
3356 }
3357 return MatchOperand_Success;
3358}
3359
Alex Bradbury58eba092016-11-01 16:32:05 +00003360OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003361AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003362 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003363 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003364 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003365 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003366
Alex Bradbury58eba092016-11-01 16:32:05 +00003367 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003368 if (Res != MatchOperand_Success)
3369 return Res;
3370
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003371 if (ConvertResult && !ConvertResult(Value)) {
3372 return MatchOperand_ParseFail;
3373 }
3374
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003375 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003376 return MatchOperand_Success;
3377}
3378
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003379OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
3380 const char *Prefix,
3381 OperandVector &Operands,
3382 AMDGPUOperand::ImmTy ImmTy,
3383 bool (*ConvertResult)(int64_t&)) {
3384 StringRef Name = Parser.getTok().getString();
3385 if (!Name.equals(Prefix))
3386 return MatchOperand_NoMatch;
3387
3388 Parser.Lex();
3389 if (getLexer().isNot(AsmToken::Colon))
3390 return MatchOperand_ParseFail;
3391
3392 Parser.Lex();
3393 if (getLexer().isNot(AsmToken::LBrac))
3394 return MatchOperand_ParseFail;
3395 Parser.Lex();
3396
3397 unsigned Val = 0;
3398 SMLoc S = Parser.getTok().getLoc();
3399
3400 // FIXME: How to verify the number of elements matches the number of src
3401 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00003402 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003403 if (I != 0) {
3404 if (getLexer().is(AsmToken::RBrac))
3405 break;
3406
3407 if (getLexer().isNot(AsmToken::Comma))
3408 return MatchOperand_ParseFail;
3409 Parser.Lex();
3410 }
3411
3412 if (getLexer().isNot(AsmToken::Integer))
3413 return MatchOperand_ParseFail;
3414
3415 int64_t Op;
3416 if (getParser().parseAbsoluteExpression(Op))
3417 return MatchOperand_ParseFail;
3418
3419 if (Op != 0 && Op != 1)
3420 return MatchOperand_ParseFail;
3421 Val |= (Op << I);
3422 }
3423
3424 Parser.Lex();
3425 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
3426 return MatchOperand_Success;
3427}
3428
Alex Bradbury58eba092016-11-01 16:32:05 +00003429OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003430AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003431 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003432 int64_t Bit = 0;
3433 SMLoc S = Parser.getTok().getLoc();
3434
3435 // We are at the end of the statement, and this is a default argument, so
3436 // use a default value.
3437 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3438 switch(getLexer().getKind()) {
3439 case AsmToken::Identifier: {
3440 StringRef Tok = Parser.getTok().getString();
3441 if (Tok == Name) {
Ryan Taylor1f334d02018-08-28 15:07:30 +00003442 if (Tok == "r128" && isGFX9())
3443 Error(S, "r128 modifier is not supported on this GPU");
3444 if (Tok == "a16" && !isGFX9())
3445 Error(S, "a16 modifier is not supported on this GPU");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003446 Bit = 1;
3447 Parser.Lex();
3448 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
3449 Bit = 0;
3450 Parser.Lex();
3451 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00003452 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003453 }
3454 break;
3455 }
3456 default:
3457 return MatchOperand_NoMatch;
3458 }
3459 }
3460
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003461 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003462 return MatchOperand_Success;
3463}
3464
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003465static void addOptionalImmOperand(
3466 MCInst& Inst, const OperandVector& Operands,
3467 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
3468 AMDGPUOperand::ImmTy ImmT,
3469 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003470 auto i = OptionalIdx.find(ImmT);
3471 if (i != OptionalIdx.end()) {
3472 unsigned Idx = i->second;
3473 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
3474 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00003475 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003476 }
3477}
3478
Alex Bradbury58eba092016-11-01 16:32:05 +00003479OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003480AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00003481 if (getLexer().isNot(AsmToken::Identifier)) {
3482 return MatchOperand_NoMatch;
3483 }
3484 StringRef Tok = Parser.getTok().getString();
3485 if (Tok != Prefix) {
3486 return MatchOperand_NoMatch;
3487 }
3488
3489 Parser.Lex();
3490 if (getLexer().isNot(AsmToken::Colon)) {
3491 return MatchOperand_ParseFail;
3492 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003493
Sam Kolton3025e7f2016-04-26 13:33:56 +00003494 Parser.Lex();
3495 if (getLexer().isNot(AsmToken::Identifier)) {
3496 return MatchOperand_ParseFail;
3497 }
3498
3499 Value = Parser.getTok().getString();
3500 return MatchOperand_Success;
3501}
3502
Tim Renouf35484c92018-08-21 11:06:05 +00003503// dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their
3504// values to live in a joint format operand in the MCInst encoding.
3505OperandMatchResultTy
3506AMDGPUAsmParser::parseDfmtNfmt(OperandVector &Operands) {
3507 SMLoc S = Parser.getTok().getLoc();
3508 int64_t Dfmt = 0, Nfmt = 0;
3509 // dfmt and nfmt can appear in either order, and each is optional.
3510 bool GotDfmt = false, GotNfmt = false;
3511 while (!GotDfmt || !GotNfmt) {
3512 if (!GotDfmt) {
3513 auto Res = parseIntWithPrefix("dfmt", Dfmt);
3514 if (Res != MatchOperand_NoMatch) {
3515 if (Res != MatchOperand_Success)
3516 return Res;
3517 if (Dfmt >= 16) {
3518 Error(Parser.getTok().getLoc(), "out of range dfmt");
3519 return MatchOperand_ParseFail;
3520 }
3521 GotDfmt = true;
3522 Parser.Lex();
3523 continue;
3524 }
3525 }
3526 if (!GotNfmt) {
3527 auto Res = parseIntWithPrefix("nfmt", Nfmt);
3528 if (Res != MatchOperand_NoMatch) {
3529 if (Res != MatchOperand_Success)
3530 return Res;
3531 if (Nfmt >= 8) {
3532 Error(Parser.getTok().getLoc(), "out of range nfmt");
3533 return MatchOperand_ParseFail;
3534 }
3535 GotNfmt = true;
3536 Parser.Lex();
3537 continue;
3538 }
3539 }
3540 break;
3541 }
3542 if (!GotDfmt && !GotNfmt)
3543 return MatchOperand_NoMatch;
3544 auto Format = Dfmt | Nfmt << 4;
3545 Operands.push_back(
3546 AMDGPUOperand::CreateImm(this, Format, S, AMDGPUOperand::ImmTyFORMAT));
3547 return MatchOperand_Success;
3548}
3549
Tom Stellard45bb48e2015-06-13 03:28:10 +00003550//===----------------------------------------------------------------------===//
3551// ds
3552//===----------------------------------------------------------------------===//
3553
Tom Stellard45bb48e2015-06-13 03:28:10 +00003554void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
3555 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003556 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003557
3558 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3559 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3560
3561 // Add the register arguments
3562 if (Op.isReg()) {
3563 Op.addRegOperands(Inst, 1);
3564 continue;
3565 }
3566
3567 // Handle optional arguments
3568 OptionalIdx[Op.getImmTy()] = i;
3569 }
3570
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003571 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
3572 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003573 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003574
Tom Stellard45bb48e2015-06-13 03:28:10 +00003575 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3576}
3577
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003578void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
3579 bool IsGdsHardcoded) {
3580 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003581
3582 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3583 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3584
3585 // Add the register arguments
3586 if (Op.isReg()) {
3587 Op.addRegOperands(Inst, 1);
3588 continue;
3589 }
3590
3591 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00003592 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003593 continue;
3594 }
3595
3596 // Handle optional arguments
3597 OptionalIdx[Op.getImmTy()] = i;
3598 }
3599
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003600 AMDGPUOperand::ImmTy OffsetType =
3601 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
3602 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
3603 AMDGPUOperand::ImmTyOffset;
3604
3605 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
3606
Artem Tamazov43b61562017-02-03 12:47:30 +00003607 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003608 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003609 }
3610 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3611}
3612
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003613void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
3614 OptionalImmIndexMap OptionalIdx;
3615
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003616 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003617 unsigned EnMask = 0;
3618 int SrcIdx = 0;
3619
3620 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3621 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3622
3623 // Add the register arguments
3624 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003625 assert(SrcIdx < 4);
3626 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003627 Op.addRegOperands(Inst, 1);
3628 ++SrcIdx;
3629 continue;
3630 }
3631
3632 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003633 assert(SrcIdx < 4);
3634 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003635 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003636 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003637 continue;
3638 }
3639
3640 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
3641 Op.addImmOperands(Inst, 1);
3642 continue;
3643 }
3644
3645 if (Op.isToken() && Op.getToken() == "done")
3646 continue;
3647
3648 // Handle optional arguments
3649 OptionalIdx[Op.getImmTy()] = i;
3650 }
3651
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003652 assert(SrcIdx == 4);
3653
3654 bool Compr = false;
3655 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
3656 Compr = true;
3657 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
3658 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
3659 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
3660 }
3661
3662 for (auto i = 0; i < SrcIdx; ++i) {
3663 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
3664 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
3665 }
3666 }
3667
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003668 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
3669 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
3670
3671 Inst.addOperand(MCOperand::createImm(EnMask));
3672}
Tom Stellard45bb48e2015-06-13 03:28:10 +00003673
3674//===----------------------------------------------------------------------===//
3675// s_waitcnt
3676//===----------------------------------------------------------------------===//
3677
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003678static bool
3679encodeCnt(
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003680 const AMDGPU::IsaVersion ISA,
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003681 int64_t &IntVal,
3682 int64_t CntVal,
3683 bool Saturate,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003684 unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned),
3685 unsigned (*decode)(const IsaVersion &Version, unsigned))
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003686{
3687 bool Failed = false;
3688
3689 IntVal = encode(ISA, IntVal, CntVal);
3690 if (CntVal != decode(ISA, IntVal)) {
3691 if (Saturate) {
3692 IntVal = encode(ISA, IntVal, -1);
3693 } else {
3694 Failed = true;
3695 }
3696 }
3697 return Failed;
3698}
3699
Tom Stellard45bb48e2015-06-13 03:28:10 +00003700bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
3701 StringRef CntName = Parser.getTok().getString();
3702 int64_t CntVal;
3703
3704 Parser.Lex();
3705 if (getLexer().isNot(AsmToken::LParen))
3706 return true;
3707
3708 Parser.Lex();
3709 if (getLexer().isNot(AsmToken::Integer))
3710 return true;
3711
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003712 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00003713 if (getParser().parseAbsoluteExpression(CntVal))
3714 return true;
3715
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003716 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003717
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003718 bool Failed = true;
3719 bool Sat = CntName.endswith("_sat");
3720
3721 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
3722 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
3723 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
3724 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
3725 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
3726 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
3727 }
3728
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003729 if (Failed) {
3730 Error(ValLoc, "too large value for " + CntName);
3731 return true;
3732 }
3733
3734 if (getLexer().isNot(AsmToken::RParen)) {
3735 return true;
3736 }
3737
3738 Parser.Lex();
3739 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
3740 const AsmToken NextToken = getLexer().peekTok();
3741 if (NextToken.is(AsmToken::Identifier)) {
3742 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003743 }
3744 }
3745
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003746 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003747}
3748
Alex Bradbury58eba092016-11-01 16:32:05 +00003749OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003750AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003751 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003752 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003753 SMLoc S = Parser.getTok().getLoc();
3754
3755 switch(getLexer().getKind()) {
3756 default: return MatchOperand_ParseFail;
3757 case AsmToken::Integer:
3758 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003759 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003760 return MatchOperand_ParseFail;
3761 break;
3762
3763 case AsmToken::Identifier:
3764 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003765 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003766 return MatchOperand_ParseFail;
3767 } while(getLexer().isNot(AsmToken::EndOfStatement));
3768 break;
3769 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003770 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003771 return MatchOperand_Success;
3772}
3773
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003774bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
3775 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003776 using namespace llvm::AMDGPU::Hwreg;
3777
Artem Tamazovd6468662016-04-25 14:13:51 +00003778 if (Parser.getTok().getString() != "hwreg")
3779 return true;
3780 Parser.Lex();
3781
3782 if (getLexer().isNot(AsmToken::LParen))
3783 return true;
3784 Parser.Lex();
3785
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003786 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003787 HwReg.IsSymbolic = true;
3788 HwReg.Id = ID_UNKNOWN_;
3789 const StringRef tok = Parser.getTok().getString();
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00003790 int Last = ID_SYMBOLIC_LAST_;
3791 if (isSI() || isCI() || isVI())
3792 Last = ID_SYMBOLIC_FIRST_GFX9_;
3793 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003794 if (tok == IdSymbolic[i]) {
3795 HwReg.Id = i;
3796 break;
3797 }
3798 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003799 Parser.Lex();
3800 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003801 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003802 if (getLexer().isNot(AsmToken::Integer))
3803 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003804 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003805 return true;
3806 }
Artem Tamazovd6468662016-04-25 14:13:51 +00003807
3808 if (getLexer().is(AsmToken::RParen)) {
3809 Parser.Lex();
3810 return false;
3811 }
3812
3813 // optional params
3814 if (getLexer().isNot(AsmToken::Comma))
3815 return true;
3816 Parser.Lex();
3817
3818 if (getLexer().isNot(AsmToken::Integer))
3819 return true;
3820 if (getParser().parseAbsoluteExpression(Offset))
3821 return true;
3822
3823 if (getLexer().isNot(AsmToken::Comma))
3824 return true;
3825 Parser.Lex();
3826
3827 if (getLexer().isNot(AsmToken::Integer))
3828 return true;
3829 if (getParser().parseAbsoluteExpression(Width))
3830 return true;
3831
3832 if (getLexer().isNot(AsmToken::RParen))
3833 return true;
3834 Parser.Lex();
3835
3836 return false;
3837}
3838
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003839OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003840 using namespace llvm::AMDGPU::Hwreg;
3841
Artem Tamazovd6468662016-04-25 14:13:51 +00003842 int64_t Imm16Val = 0;
3843 SMLoc S = Parser.getTok().getLoc();
3844
3845 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00003846 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00003847 case AsmToken::Integer:
3848 // The operand can be an integer value.
3849 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00003850 return MatchOperand_NoMatch;
3851 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00003852 Error(S, "invalid immediate: only 16-bit values are legal");
3853 // Do not return error code, but create an imm operand anyway and proceed
3854 // to the next operand, if any. That avoids unneccessary error messages.
3855 }
3856 break;
3857
3858 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003859 OperandInfoTy HwReg(ID_UNKNOWN_);
3860 int64_t Offset = OFFSET_DEFAULT_;
3861 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3862 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00003863 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003864 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3865 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003866 Error(S, "invalid symbolic name of hardware register");
3867 else
3868 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00003869 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003870 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00003871 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003872 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00003873 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003874 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00003875 }
3876 break;
3877 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003878 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00003879 return MatchOperand_Success;
3880}
3881
Tom Stellard45bb48e2015-06-13 03:28:10 +00003882bool AMDGPUOperand::isSWaitCnt() const {
3883 return isImm();
3884}
3885
Artem Tamazovd6468662016-04-25 14:13:51 +00003886bool AMDGPUOperand::isHwreg() const {
3887 return isImmTy(ImmTyHwreg);
3888}
3889
Artem Tamazov6edc1352016-05-26 17:00:33 +00003890bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003891 using namespace llvm::AMDGPU::SendMsg;
3892
3893 if (Parser.getTok().getString() != "sendmsg")
3894 return true;
3895 Parser.Lex();
3896
3897 if (getLexer().isNot(AsmToken::LParen))
3898 return true;
3899 Parser.Lex();
3900
3901 if (getLexer().is(AsmToken::Identifier)) {
3902 Msg.IsSymbolic = true;
3903 Msg.Id = ID_UNKNOWN_;
3904 const std::string tok = Parser.getTok().getString();
3905 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3906 switch(i) {
3907 default: continue; // Omit gaps.
3908 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3909 }
3910 if (tok == IdSymbolic[i]) {
3911 Msg.Id = i;
3912 break;
3913 }
3914 }
3915 Parser.Lex();
3916 } else {
3917 Msg.IsSymbolic = false;
3918 if (getLexer().isNot(AsmToken::Integer))
3919 return true;
3920 if (getParser().parseAbsoluteExpression(Msg.Id))
3921 return true;
3922 if (getLexer().is(AsmToken::Integer))
3923 if (getParser().parseAbsoluteExpression(Msg.Id))
3924 Msg.Id = ID_UNKNOWN_;
3925 }
3926 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3927 return false;
3928
3929 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3930 if (getLexer().isNot(AsmToken::RParen))
3931 return true;
3932 Parser.Lex();
3933 return false;
3934 }
3935
3936 if (getLexer().isNot(AsmToken::Comma))
3937 return true;
3938 Parser.Lex();
3939
3940 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
3941 Operation.Id = ID_UNKNOWN_;
3942 if (getLexer().is(AsmToken::Identifier)) {
3943 Operation.IsSymbolic = true;
3944 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3945 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3946 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003947 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003948 for (int i = F; i < L; ++i) {
3949 if (Tok == S[i]) {
3950 Operation.Id = i;
3951 break;
3952 }
3953 }
3954 Parser.Lex();
3955 } else {
3956 Operation.IsSymbolic = false;
3957 if (getLexer().isNot(AsmToken::Integer))
3958 return true;
3959 if (getParser().parseAbsoluteExpression(Operation.Id))
3960 return true;
3961 }
3962
3963 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3964 // Stream id is optional.
3965 if (getLexer().is(AsmToken::RParen)) {
3966 Parser.Lex();
3967 return false;
3968 }
3969
3970 if (getLexer().isNot(AsmToken::Comma))
3971 return true;
3972 Parser.Lex();
3973
3974 if (getLexer().isNot(AsmToken::Integer))
3975 return true;
3976 if (getParser().parseAbsoluteExpression(StreamId))
3977 return true;
3978 }
3979
3980 if (getLexer().isNot(AsmToken::RParen))
3981 return true;
3982 Parser.Lex();
3983 return false;
3984}
3985
Matt Arsenault0e8a2992016-12-15 20:40:20 +00003986OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3987 if (getLexer().getKind() != AsmToken::Identifier)
3988 return MatchOperand_NoMatch;
3989
3990 StringRef Str = Parser.getTok().getString();
3991 int Slot = StringSwitch<int>(Str)
3992 .Case("p10", 0)
3993 .Case("p20", 1)
3994 .Case("p0", 2)
3995 .Default(-1);
3996
3997 SMLoc S = Parser.getTok().getLoc();
3998 if (Slot == -1)
3999 return MatchOperand_ParseFail;
4000
4001 Parser.Lex();
4002 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
4003 AMDGPUOperand::ImmTyInterpSlot));
4004 return MatchOperand_Success;
4005}
4006
4007OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
4008 if (getLexer().getKind() != AsmToken::Identifier)
4009 return MatchOperand_NoMatch;
4010
4011 StringRef Str = Parser.getTok().getString();
4012 if (!Str.startswith("attr"))
4013 return MatchOperand_NoMatch;
4014
4015 StringRef Chan = Str.take_back(2);
4016 int AttrChan = StringSwitch<int>(Chan)
4017 .Case(".x", 0)
4018 .Case(".y", 1)
4019 .Case(".z", 2)
4020 .Case(".w", 3)
4021 .Default(-1);
4022 if (AttrChan == -1)
4023 return MatchOperand_ParseFail;
4024
4025 Str = Str.drop_back(2).drop_front(4);
4026
4027 uint8_t Attr;
4028 if (Str.getAsInteger(10, Attr))
4029 return MatchOperand_ParseFail;
4030
4031 SMLoc S = Parser.getTok().getLoc();
4032 Parser.Lex();
4033 if (Attr > 63) {
4034 Error(S, "out of bounds attr");
4035 return MatchOperand_Success;
4036 }
4037
4038 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
4039
4040 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
4041 AMDGPUOperand::ImmTyInterpAttr));
4042 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
4043 AMDGPUOperand::ImmTyAttrChan));
4044 return MatchOperand_Success;
4045}
4046
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004047void AMDGPUAsmParser::errorExpTgt() {
4048 Error(Parser.getTok().getLoc(), "invalid exp target");
4049}
4050
4051OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
4052 uint8_t &Val) {
4053 if (Str == "null") {
4054 Val = 9;
4055 return MatchOperand_Success;
4056 }
4057
4058 if (Str.startswith("mrt")) {
4059 Str = Str.drop_front(3);
4060 if (Str == "z") { // == mrtz
4061 Val = 8;
4062 return MatchOperand_Success;
4063 }
4064
4065 if (Str.getAsInteger(10, Val))
4066 return MatchOperand_ParseFail;
4067
4068 if (Val > 7)
4069 errorExpTgt();
4070
4071 return MatchOperand_Success;
4072 }
4073
4074 if (Str.startswith("pos")) {
4075 Str = Str.drop_front(3);
4076 if (Str.getAsInteger(10, Val))
4077 return MatchOperand_ParseFail;
4078
4079 if (Val > 3)
4080 errorExpTgt();
4081
4082 Val += 12;
4083 return MatchOperand_Success;
4084 }
4085
4086 if (Str.startswith("param")) {
4087 Str = Str.drop_front(5);
4088 if (Str.getAsInteger(10, Val))
4089 return MatchOperand_ParseFail;
4090
4091 if (Val >= 32)
4092 errorExpTgt();
4093
4094 Val += 32;
4095 return MatchOperand_Success;
4096 }
4097
4098 if (Str.startswith("invalid_target_")) {
4099 Str = Str.drop_front(15);
4100 if (Str.getAsInteger(10, Val))
4101 return MatchOperand_ParseFail;
4102
4103 errorExpTgt();
4104 return MatchOperand_Success;
4105 }
4106
4107 return MatchOperand_NoMatch;
4108}
4109
4110OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
4111 uint8_t Val;
4112 StringRef Str = Parser.getTok().getString();
4113
4114 auto Res = parseExpTgtImpl(Str, Val);
4115 if (Res != MatchOperand_Success)
4116 return Res;
4117
4118 SMLoc S = Parser.getTok().getLoc();
4119 Parser.Lex();
4120
4121 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
4122 AMDGPUOperand::ImmTyExpTgt));
4123 return MatchOperand_Success;
4124}
4125
Alex Bradbury58eba092016-11-01 16:32:05 +00004126OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004127AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
4128 using namespace llvm::AMDGPU::SendMsg;
4129
4130 int64_t Imm16Val = 0;
4131 SMLoc S = Parser.getTok().getLoc();
4132
4133 switch(getLexer().getKind()) {
4134 default:
4135 return MatchOperand_NoMatch;
4136 case AsmToken::Integer:
4137 // The operand can be an integer value.
4138 if (getParser().parseAbsoluteExpression(Imm16Val))
4139 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004140 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004141 Error(S, "invalid immediate: only 16-bit values are legal");
4142 // Do not return error code, but create an imm operand anyway and proceed
4143 // to the next operand, if any. That avoids unneccessary error messages.
4144 }
4145 break;
4146 case AsmToken::Identifier: {
4147 OperandInfoTy Msg(ID_UNKNOWN_);
4148 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00004149 int64_t StreamId = STREAM_ID_DEFAULT_;
4150 if (parseSendMsgConstruct(Msg, Operation, StreamId))
4151 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004152 do {
4153 // Validate and encode message ID.
4154 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
4155 || Msg.Id == ID_SYSMSG)) {
4156 if (Msg.IsSymbolic)
4157 Error(S, "invalid/unsupported symbolic name of message");
4158 else
4159 Error(S, "invalid/unsupported code of message");
4160 break;
4161 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004162 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004163 // Validate and encode operation ID.
4164 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
4165 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
4166 if (Operation.IsSymbolic)
4167 Error(S, "invalid symbolic name of GS_OP");
4168 else
4169 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
4170 break;
4171 }
4172 if (Operation.Id == OP_GS_NOP
4173 && Msg.Id != ID_GS_DONE) {
4174 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
4175 break;
4176 }
4177 Imm16Val |= (Operation.Id << OP_SHIFT_);
4178 }
4179 if (Msg.Id == ID_SYSMSG) {
4180 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
4181 if (Operation.IsSymbolic)
4182 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
4183 else
4184 Error(S, "invalid/unsupported code of SYSMSG_OP");
4185 break;
4186 }
4187 Imm16Val |= (Operation.Id << OP_SHIFT_);
4188 }
4189 // Validate and encode stream ID.
4190 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4191 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
4192 Error(S, "invalid stream id: only 2-bit values are legal");
4193 break;
4194 }
4195 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
4196 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004197 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004198 }
4199 break;
4200 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004201 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004202 return MatchOperand_Success;
4203}
4204
4205bool AMDGPUOperand::isSendMsg() const {
4206 return isImmTy(ImmTySendMsg);
4207}
4208
Tom Stellard45bb48e2015-06-13 03:28:10 +00004209//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004210// parser helpers
4211//===----------------------------------------------------------------------===//
4212
4213bool
4214AMDGPUAsmParser::trySkipId(const StringRef Id) {
4215 if (getLexer().getKind() == AsmToken::Identifier &&
4216 Parser.getTok().getString() == Id) {
4217 Parser.Lex();
4218 return true;
4219 }
4220 return false;
4221}
4222
4223bool
4224AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
4225 if (getLexer().getKind() == Kind) {
4226 Parser.Lex();
4227 return true;
4228 }
4229 return false;
4230}
4231
4232bool
4233AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
4234 const StringRef ErrMsg) {
4235 if (!trySkipToken(Kind)) {
4236 Error(Parser.getTok().getLoc(), ErrMsg);
4237 return false;
4238 }
4239 return true;
4240}
4241
4242bool
4243AMDGPUAsmParser::parseExpr(int64_t &Imm) {
4244 return !getParser().parseAbsoluteExpression(Imm);
4245}
4246
4247bool
4248AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
4249 SMLoc S = Parser.getTok().getLoc();
4250 if (getLexer().getKind() == AsmToken::String) {
4251 Val = Parser.getTok().getStringContents();
4252 Parser.Lex();
4253 return true;
4254 } else {
4255 Error(S, ErrMsg);
4256 return false;
4257 }
4258}
4259
4260//===----------------------------------------------------------------------===//
4261// swizzle
4262//===----------------------------------------------------------------------===//
4263
4264LLVM_READNONE
4265static unsigned
4266encodeBitmaskPerm(const unsigned AndMask,
4267 const unsigned OrMask,
4268 const unsigned XorMask) {
4269 using namespace llvm::AMDGPU::Swizzle;
4270
4271 return BITMASK_PERM_ENC |
4272 (AndMask << BITMASK_AND_SHIFT) |
4273 (OrMask << BITMASK_OR_SHIFT) |
4274 (XorMask << BITMASK_XOR_SHIFT);
4275}
4276
4277bool
4278AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
4279 const unsigned MinVal,
4280 const unsigned MaxVal,
4281 const StringRef ErrMsg) {
4282 for (unsigned i = 0; i < OpNum; ++i) {
4283 if (!skipToken(AsmToken::Comma, "expected a comma")){
4284 return false;
4285 }
4286 SMLoc ExprLoc = Parser.getTok().getLoc();
4287 if (!parseExpr(Op[i])) {
4288 return false;
4289 }
4290 if (Op[i] < MinVal || Op[i] > MaxVal) {
4291 Error(ExprLoc, ErrMsg);
4292 return false;
4293 }
4294 }
4295
4296 return true;
4297}
4298
4299bool
4300AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
4301 using namespace llvm::AMDGPU::Swizzle;
4302
4303 int64_t Lane[LANE_NUM];
4304 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
4305 "expected a 2-bit lane id")) {
4306 Imm = QUAD_PERM_ENC;
4307 for (auto i = 0; i < LANE_NUM; ++i) {
4308 Imm |= Lane[i] << (LANE_SHIFT * i);
4309 }
4310 return true;
4311 }
4312 return false;
4313}
4314
4315bool
4316AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
4317 using namespace llvm::AMDGPU::Swizzle;
4318
4319 SMLoc S = Parser.getTok().getLoc();
4320 int64_t GroupSize;
4321 int64_t LaneIdx;
4322
4323 if (!parseSwizzleOperands(1, &GroupSize,
4324 2, 32,
4325 "group size must be in the interval [2,32]")) {
4326 return false;
4327 }
4328 if (!isPowerOf2_64(GroupSize)) {
4329 Error(S, "group size must be a power of two");
4330 return false;
4331 }
4332 if (parseSwizzleOperands(1, &LaneIdx,
4333 0, GroupSize - 1,
4334 "lane id must be in the interval [0,group size - 1]")) {
4335 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
4336 return true;
4337 }
4338 return false;
4339}
4340
4341bool
4342AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
4343 using namespace llvm::AMDGPU::Swizzle;
4344
4345 SMLoc S = Parser.getTok().getLoc();
4346 int64_t GroupSize;
4347
4348 if (!parseSwizzleOperands(1, &GroupSize,
4349 2, 32, "group size must be in the interval [2,32]")) {
4350 return false;
4351 }
4352 if (!isPowerOf2_64(GroupSize)) {
4353 Error(S, "group size must be a power of two");
4354 return false;
4355 }
4356
4357 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
4358 return true;
4359}
4360
4361bool
4362AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
4363 using namespace llvm::AMDGPU::Swizzle;
4364
4365 SMLoc S = Parser.getTok().getLoc();
4366 int64_t GroupSize;
4367
4368 if (!parseSwizzleOperands(1, &GroupSize,
4369 1, 16, "group size must be in the interval [1,16]")) {
4370 return false;
4371 }
4372 if (!isPowerOf2_64(GroupSize)) {
4373 Error(S, "group size must be a power of two");
4374 return false;
4375 }
4376
4377 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
4378 return true;
4379}
4380
4381bool
4382AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
4383 using namespace llvm::AMDGPU::Swizzle;
4384
4385 if (!skipToken(AsmToken::Comma, "expected a comma")) {
4386 return false;
4387 }
4388
4389 StringRef Ctl;
4390 SMLoc StrLoc = Parser.getTok().getLoc();
4391 if (!parseString(Ctl)) {
4392 return false;
4393 }
4394 if (Ctl.size() != BITMASK_WIDTH) {
4395 Error(StrLoc, "expected a 5-character mask");
4396 return false;
4397 }
4398
4399 unsigned AndMask = 0;
4400 unsigned OrMask = 0;
4401 unsigned XorMask = 0;
4402
4403 for (size_t i = 0; i < Ctl.size(); ++i) {
4404 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
4405 switch(Ctl[i]) {
4406 default:
4407 Error(StrLoc, "invalid mask");
4408 return false;
4409 case '0':
4410 break;
4411 case '1':
4412 OrMask |= Mask;
4413 break;
4414 case 'p':
4415 AndMask |= Mask;
4416 break;
4417 case 'i':
4418 AndMask |= Mask;
4419 XorMask |= Mask;
4420 break;
4421 }
4422 }
4423
4424 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
4425 return true;
4426}
4427
4428bool
4429AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
4430
4431 SMLoc OffsetLoc = Parser.getTok().getLoc();
4432
4433 if (!parseExpr(Imm)) {
4434 return false;
4435 }
4436 if (!isUInt<16>(Imm)) {
4437 Error(OffsetLoc, "expected a 16-bit offset");
4438 return false;
4439 }
4440 return true;
4441}
4442
4443bool
4444AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
4445 using namespace llvm::AMDGPU::Swizzle;
4446
4447 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
4448
4449 SMLoc ModeLoc = Parser.getTok().getLoc();
4450 bool Ok = false;
4451
4452 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
4453 Ok = parseSwizzleQuadPerm(Imm);
4454 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
4455 Ok = parseSwizzleBitmaskPerm(Imm);
4456 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
4457 Ok = parseSwizzleBroadcast(Imm);
4458 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
4459 Ok = parseSwizzleSwap(Imm);
4460 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
4461 Ok = parseSwizzleReverse(Imm);
4462 } else {
4463 Error(ModeLoc, "expected a swizzle mode");
4464 }
4465
4466 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
4467 }
4468
4469 return false;
4470}
4471
4472OperandMatchResultTy
4473AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
4474 SMLoc S = Parser.getTok().getLoc();
4475 int64_t Imm = 0;
4476
4477 if (trySkipId("offset")) {
4478
4479 bool Ok = false;
4480 if (skipToken(AsmToken::Colon, "expected a colon")) {
4481 if (trySkipId("swizzle")) {
4482 Ok = parseSwizzleMacro(Imm);
4483 } else {
4484 Ok = parseSwizzleOffset(Imm);
4485 }
4486 }
4487
4488 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
4489
4490 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
4491 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00004492 // Swizzle "offset" operand is optional.
4493 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004494 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004495 }
4496}
4497
4498bool
4499AMDGPUOperand::isSwizzle() const {
4500 return isImmTy(ImmTySwizzle);
4501}
4502
4503//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004504// sopp branch targets
4505//===----------------------------------------------------------------------===//
4506
Alex Bradbury58eba092016-11-01 16:32:05 +00004507OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004508AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
4509 SMLoc S = Parser.getTok().getLoc();
4510
4511 switch (getLexer().getKind()) {
4512 default: return MatchOperand_ParseFail;
4513 case AsmToken::Integer: {
4514 int64_t Imm;
4515 if (getParser().parseAbsoluteExpression(Imm))
4516 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004517 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004518 return MatchOperand_Success;
4519 }
4520
4521 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004522 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00004523 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
4524 Parser.getTok().getString()), getContext()), S));
4525 Parser.Lex();
4526 return MatchOperand_Success;
4527 }
4528}
4529
4530//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004531// mubuf
4532//===----------------------------------------------------------------------===//
4533
Sam Kolton5f10a132016-05-06 11:31:17 +00004534AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004535 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004536}
4537
4538AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004539 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004540}
4541
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004542void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
4543 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004544 bool IsAtomic,
4545 bool IsAtomicReturn,
4546 bool IsLds) {
4547 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004548 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004549 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004550 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004551
4552 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4553 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4554
4555 // Add the register arguments
4556 if (Op.isReg()) {
4557 Op.addRegOperands(Inst, 1);
4558 continue;
4559 }
4560
4561 // Handle the case where soffset is an immediate
4562 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4563 Op.addImmOperands(Inst, 1);
4564 continue;
4565 }
4566
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004567 HasLdsModifier = Op.isLDS();
4568
Tom Stellard45bb48e2015-06-13 03:28:10 +00004569 // Handle tokens like 'offen' which are sometimes hard-coded into the
4570 // asm string. There are no MCInst operands for these.
4571 if (Op.isToken()) {
4572 continue;
4573 }
4574 assert(Op.isImm());
4575
4576 // Handle optional arguments
4577 OptionalIdx[Op.getImmTy()] = i;
4578 }
4579
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004580 // This is a workaround for an llvm quirk which may result in an
4581 // incorrect instruction selection. Lds and non-lds versions of
4582 // MUBUF instructions are identical except that lds versions
4583 // have mandatory 'lds' modifier. However this modifier follows
4584 // optional modifiers and llvm asm matcher regards this 'lds'
4585 // modifier as an optional one. As a result, an lds version
4586 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004587 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004588 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
4589 if (NoLdsOpcode != -1) { // Got lds version - correct it.
4590 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004591 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004592 }
4593 }
4594
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004595 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
4596 if (IsAtomicReturn) {
4597 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
4598 Inst.insert(I, *I);
4599 }
4600
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004601 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004602 if (!IsAtomic) { // glc is hard-coded.
4603 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4604 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004605 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004606
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004607 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004608 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4609 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004610}
4611
David Stuttard70e8bc12017-06-22 16:29:22 +00004612void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
4613 OptionalImmIndexMap OptionalIdx;
4614
4615 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4616 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4617
4618 // Add the register arguments
4619 if (Op.isReg()) {
4620 Op.addRegOperands(Inst, 1);
4621 continue;
4622 }
4623
4624 // Handle the case where soffset is an immediate
4625 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4626 Op.addImmOperands(Inst, 1);
4627 continue;
4628 }
4629
4630 // Handle tokens like 'offen' which are sometimes hard-coded into the
4631 // asm string. There are no MCInst operands for these.
4632 if (Op.isToken()) {
4633 continue;
4634 }
4635 assert(Op.isImm());
4636
4637 // Handle optional arguments
4638 OptionalIdx[Op.getImmTy()] = i;
4639 }
4640
4641 addOptionalImmOperand(Inst, Operands, OptionalIdx,
4642 AMDGPUOperand::ImmTyOffset);
Tim Renouf35484c92018-08-21 11:06:05 +00004643 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT);
David Stuttard70e8bc12017-06-22 16:29:22 +00004644 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4645 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4646 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4647}
4648
Tom Stellard45bb48e2015-06-13 03:28:10 +00004649//===----------------------------------------------------------------------===//
4650// mimg
4651//===----------------------------------------------------------------------===//
4652
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004653void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
4654 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00004655 unsigned I = 1;
4656 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4657 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4658 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4659 }
4660
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004661 if (IsAtomic) {
4662 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004663 assert(Desc.getNumDefs() == 1);
4664 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004665 }
4666
Sam Kolton1bdcef72016-05-23 09:59:02 +00004667 OptionalImmIndexMap OptionalIdx;
4668
4669 for (unsigned E = Operands.size(); I != E; ++I) {
4670 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4671
4672 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004673 if (Op.isReg()) {
4674 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004675 } else if (Op.isImmModifier()) {
4676 OptionalIdx[Op.getImmTy()] = I;
4677 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00004678 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00004679 }
4680 }
4681
4682 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
4683 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
4684 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004685 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00004686 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004687 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4688 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004689 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00004690 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004691}
4692
4693void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004694 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004695}
4696
Tom Stellard45bb48e2015-06-13 03:28:10 +00004697//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00004698// smrd
4699//===----------------------------------------------------------------------===//
4700
Artem Tamazov54bfd542016-10-31 16:07:39 +00004701bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00004702 return isImm() && isUInt<8>(getImm());
4703}
4704
Artem Tamazov54bfd542016-10-31 16:07:39 +00004705bool AMDGPUOperand::isSMRDOffset20() const {
4706 return isImm() && isUInt<20>(getImm());
4707}
4708
Tom Stellard217361c2015-08-06 19:28:38 +00004709bool AMDGPUOperand::isSMRDLiteralOffset() const {
4710 // 32-bit literals are only supported on CI and we only want to use them
4711 // when the offset is > 8-bits.
4712 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
4713}
4714
Artem Tamazov54bfd542016-10-31 16:07:39 +00004715AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
4716 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4717}
4718
4719AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004720 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00004721}
4722
4723AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004724 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00004725}
4726
Matt Arsenaultfd023142017-06-12 15:55:58 +00004727AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
4728 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4729}
4730
Matt Arsenault9698f1c2017-06-20 19:54:14 +00004731AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
4732 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4733}
4734
Tom Stellard217361c2015-08-06 19:28:38 +00004735//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004736// vop3
4737//===----------------------------------------------------------------------===//
4738
4739static bool ConvertOmodMul(int64_t &Mul) {
4740 if (Mul != 1 && Mul != 2 && Mul != 4)
4741 return false;
4742
4743 Mul >>= 1;
4744 return true;
4745}
4746
4747static bool ConvertOmodDiv(int64_t &Div) {
4748 if (Div == 1) {
4749 Div = 0;
4750 return true;
4751 }
4752
4753 if (Div == 2) {
4754 Div = 3;
4755 return true;
4756 }
4757
4758 return false;
4759}
4760
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004761static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
4762 if (BoundCtrl == 0) {
4763 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004764 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00004765 }
4766
4767 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004768 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004769 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004770 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004771
Tom Stellard45bb48e2015-06-13 03:28:10 +00004772 return false;
4773}
4774
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004775// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00004776static const OptionalOperand AMDGPUOptionalOperandTable[] = {
4777 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
4778 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
4779 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
4780 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
4781 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
4782 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004783 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004784 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00004785 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
Tim Renouf35484c92018-08-21 11:06:05 +00004786 {"dfmt", AMDGPUOperand::ImmTyFORMAT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004787 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
4788 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
4789 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00004790 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004791 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004792 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
4793 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
4794 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
4795 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
Ryan Taylor1f334d02018-08-28 15:07:30 +00004796 {"r128", AMDGPUOperand::ImmTyR128A16, true, nullptr},
4797 {"a16", AMDGPUOperand::ImmTyR128A16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004798 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00004799 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004800 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
4801 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
4802 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
4803 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00004804 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
4805 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
4806 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004807 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004808 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004809 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004810 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
4811 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
4812 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
4813 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004814};
Tom Stellard45bb48e2015-06-13 03:28:10 +00004815
Alex Bradbury58eba092016-11-01 16:32:05 +00004816OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004817 unsigned size = Operands.size();
4818 assert(size > 0);
4819
4820 OperandMatchResultTy res = parseOptionalOpr(Operands);
4821
4822 // This is a hack to enable hardcoded mandatory operands which follow
4823 // optional operands.
4824 //
4825 // Current design assumes that all operands after the first optional operand
4826 // are also optional. However implementation of some instructions violates
4827 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
4828 //
4829 // To alleviate this problem, we have to (implicitly) parse extra operands
4830 // to make sure autogenerated parser of custom operands never hit hardcoded
4831 // mandatory operands.
4832
4833 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
4834
4835 // We have parsed the first optional operand.
4836 // Parse as many operands as necessary to skip all mandatory operands.
4837
4838 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
4839 if (res != MatchOperand_Success ||
4840 getLexer().is(AsmToken::EndOfStatement)) break;
4841 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
4842 res = parseOptionalOpr(Operands);
4843 }
4844 }
4845
4846 return res;
4847}
4848
4849OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00004850 OperandMatchResultTy res;
4851 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
4852 // try to parse any optional operand here
4853 if (Op.IsBit) {
4854 res = parseNamedBit(Op.Name, Operands, Op.Type);
4855 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
4856 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004857 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
4858 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
4859 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
4860 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00004861 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
4862 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004863 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
4864 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
4865 Op.Type == AMDGPUOperand::ImmTyNegLo ||
4866 Op.Type == AMDGPUOperand::ImmTyNegHi) {
4867 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
4868 Op.ConvertResult);
Tim Renouf35484c92018-08-21 11:06:05 +00004869 } else if (Op.Type == AMDGPUOperand::ImmTyFORMAT) {
4870 res = parseDfmtNfmt(Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00004871 } else {
4872 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
4873 }
4874 if (res != MatchOperand_NoMatch) {
4875 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004876 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004877 }
4878 return MatchOperand_NoMatch;
4879}
4880
Matt Arsenault12c53892016-11-15 19:58:54 +00004881OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004882 StringRef Name = Parser.getTok().getString();
4883 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00004884 return parseIntWithPrefix("mul", Operands,
4885 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004886 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004887
4888 if (Name == "div") {
4889 return parseIntWithPrefix("div", Operands,
4890 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4891 }
4892
4893 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004894}
4895
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004896void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
4897 cvtVOP3P(Inst, Operands);
4898
4899 int Opc = Inst.getOpcode();
4900
4901 int SrcNum;
4902 const int Ops[] = { AMDGPU::OpName::src0,
4903 AMDGPU::OpName::src1,
4904 AMDGPU::OpName::src2 };
4905 for (SrcNum = 0;
4906 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
4907 ++SrcNum);
4908 assert(SrcNum > 0);
4909
4910 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4911 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4912
4913 if ((OpSel & (1 << SrcNum)) != 0) {
4914 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
4915 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
4916 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
4917 }
4918}
4919
Sam Koltona3ec5c12016-10-07 14:46:06 +00004920static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4921 // 1. This operand is input modifiers
4922 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4923 // 2. This is not last operand
4924 && Desc.NumOperands > (OpNum + 1)
4925 // 3. Next operand is register class
4926 && Desc.OpInfo[OpNum + 1].RegClass != -1
4927 // 4. Next register is not tied to any other operand
4928 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4929}
4930
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00004931void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
4932{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004933 OptionalImmIndexMap OptionalIdx;
4934 unsigned Opc = Inst.getOpcode();
4935
4936 unsigned I = 1;
4937 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4938 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4939 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4940 }
4941
4942 for (unsigned E = Operands.size(); I != E; ++I) {
4943 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4944 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4945 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4946 } else if (Op.isInterpSlot() ||
4947 Op.isInterpAttr() ||
4948 Op.isAttrChan()) {
4949 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
4950 } else if (Op.isImmModifier()) {
4951 OptionalIdx[Op.getImmTy()] = I;
4952 } else {
4953 llvm_unreachable("unhandled operand type");
4954 }
4955 }
4956
4957 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
4958 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
4959 }
4960
4961 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4962 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4963 }
4964
4965 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4966 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4967 }
4968}
4969
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004970void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
4971 OptionalImmIndexMap &OptionalIdx) {
4972 unsigned Opc = Inst.getOpcode();
4973
Tom Stellarda90b9522016-02-11 03:28:15 +00004974 unsigned I = 1;
4975 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00004976 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004977 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00004978 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004979
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004980 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
4981 // This instruction has src modifiers
4982 for (unsigned E = Operands.size(); I != E; ++I) {
4983 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4984 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4985 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4986 } else if (Op.isImmModifier()) {
4987 OptionalIdx[Op.getImmTy()] = I;
4988 } else if (Op.isRegOrImm()) {
4989 Op.addRegOrImmOperands(Inst, 1);
4990 } else {
4991 llvm_unreachable("unhandled operand type");
4992 }
4993 }
4994 } else {
4995 // No src modifiers
4996 for (unsigned E = Operands.size(); I != E; ++I) {
4997 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4998 if (Op.isMod()) {
4999 OptionalIdx[Op.getImmTy()] = I;
5000 } else {
5001 Op.addRegOrImmOperands(Inst, 1);
5002 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005003 }
Tom Stellarda90b9522016-02-11 03:28:15 +00005004 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005005
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005006 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5007 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5008 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005009
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005010 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5011 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5012 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00005013
Matt Arsenault0084adc2018-04-30 19:08:16 +00005014 // Special case v_mac_{f16, f32} and v_fmac_f32 (gfx906):
Sam Koltona3ec5c12016-10-07 14:46:06 +00005015 // it has src2 register operand that is tied to dst operand
5016 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00005017 // should be 0.
5018 if (Opc == AMDGPU::V_MAC_F32_e64_si ||
5019 Opc == AMDGPU::V_MAC_F32_e64_vi ||
5020 Opc == AMDGPU::V_MAC_F16_e64_vi ||
5021 Opc == AMDGPU::V_FMAC_F32_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005022 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005023 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005024 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
5025 ++it;
5026 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
5027 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005028}
5029
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005030void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005031 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005032 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005033}
5034
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005035void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
5036 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005037 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005038 const int Opc = Inst.getOpcode();
5039 const MCInstrDesc &Desc = MII.get(Opc);
5040
5041 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005042
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005043 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005044
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00005045 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
5046 assert(!IsPacked);
5047 Inst.addOperand(Inst.getOperand(0));
5048 }
5049
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005050 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
5051 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005052
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005053 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005054
5055 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
5056 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005057 int DefaultVal = IsPacked ? -1 : 0;
5058 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
5059 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005060 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005061
5062 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
5063 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005064 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005065 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
5066 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
5067 }
5068
5069 const int Ops[] = { AMDGPU::OpName::src0,
5070 AMDGPU::OpName::src1,
5071 AMDGPU::OpName::src2 };
5072 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
5073 AMDGPU::OpName::src1_modifiers,
5074 AMDGPU::OpName::src2_modifiers };
5075
5076 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005077
5078 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005079 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005080 unsigned NegLo = 0;
5081 unsigned NegHi = 0;
5082
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005083 if (OpSelHiIdx != -1) {
5084 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
5085 }
5086
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005087 if (NegLoIdx != -1) {
5088 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
5089 NegLo = Inst.getOperand(NegLoIdx).getImm();
5090 NegHi = Inst.getOperand(NegHiIdx).getImm();
5091 }
5092
5093 for (int J = 0; J < 3; ++J) {
5094 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
5095 if (OpIdx == -1)
5096 break;
5097
5098 uint32_t ModVal = 0;
5099
5100 if ((OpSel & (1 << J)) != 0)
5101 ModVal |= SISrcMods::OP_SEL_0;
5102
5103 if ((OpSelHi & (1 << J)) != 0)
5104 ModVal |= SISrcMods::OP_SEL_1;
5105
5106 if ((NegLo & (1 << J)) != 0)
5107 ModVal |= SISrcMods::NEG;
5108
5109 if ((NegHi & (1 << J)) != 0)
5110 ModVal |= SISrcMods::NEG_HI;
5111
5112 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
5113
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00005114 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005115 }
5116}
5117
Sam Koltondfa29f72016-03-09 12:29:31 +00005118//===----------------------------------------------------------------------===//
5119// dpp
5120//===----------------------------------------------------------------------===//
5121
5122bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005123 using namespace AMDGPU::DPP;
5124
Sam Koltondfa29f72016-03-09 12:29:31 +00005125 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
5126 if (result) {
5127 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005128 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
5129 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
5130 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
5131 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
5132 (Imm == DppCtrl::WAVE_SHL1) ||
5133 (Imm == DppCtrl::WAVE_ROL1) ||
5134 (Imm == DppCtrl::WAVE_SHR1) ||
5135 (Imm == DppCtrl::WAVE_ROR1) ||
5136 (Imm == DppCtrl::ROW_MIRROR) ||
5137 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
5138 (Imm == DppCtrl::BCAST15) ||
5139 (Imm == DppCtrl::BCAST31);
Sam Koltondfa29f72016-03-09 12:29:31 +00005140 }
5141 return false;
5142}
5143
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00005144bool AMDGPUOperand::isGPRIdxMode() const {
5145 return isImm() && isUInt<4>(getImm());
5146}
5147
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00005148bool AMDGPUOperand::isS16Imm() const {
5149 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
5150}
5151
5152bool AMDGPUOperand::isU16Imm() const {
5153 return isImm() && isUInt<16>(getImm());
5154}
5155
Alex Bradbury58eba092016-11-01 16:32:05 +00005156OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00005157AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005158 using namespace AMDGPU::DPP;
5159
Sam Koltondfa29f72016-03-09 12:29:31 +00005160 SMLoc S = Parser.getTok().getLoc();
5161 StringRef Prefix;
5162 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00005163
Sam Koltona74cd522016-03-18 15:35:51 +00005164 if (getLexer().getKind() == AsmToken::Identifier) {
5165 Prefix = Parser.getTok().getString();
5166 } else {
5167 return MatchOperand_NoMatch;
5168 }
5169
5170 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005171 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005172 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005173 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005174 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005175 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005176 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005177 // Check to prevent parseDPPCtrlOps from eating invalid tokens
5178 if (Prefix != "quad_perm"
5179 && Prefix != "row_shl"
5180 && Prefix != "row_shr"
5181 && Prefix != "row_ror"
5182 && Prefix != "wave_shl"
5183 && Prefix != "wave_rol"
5184 && Prefix != "wave_shr"
5185 && Prefix != "wave_ror"
5186 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00005187 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00005188 }
5189
Sam Koltona74cd522016-03-18 15:35:51 +00005190 Parser.Lex();
5191 if (getLexer().isNot(AsmToken::Colon))
5192 return MatchOperand_ParseFail;
5193
5194 if (Prefix == "quad_perm") {
5195 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00005196 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005197 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00005198 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005199 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005200
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005201 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00005202 return MatchOperand_ParseFail;
5203
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005204 for (int i = 0; i < 3; ++i) {
5205 if (getLexer().isNot(AsmToken::Comma))
5206 return MatchOperand_ParseFail;
5207 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005208
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005209 int64_t Temp;
5210 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
5211 return MatchOperand_ParseFail;
5212 const int shift = i*2 + 2;
5213 Int += (Temp << shift);
5214 }
Sam Koltona74cd522016-03-18 15:35:51 +00005215
Sam Koltona74cd522016-03-18 15:35:51 +00005216 if (getLexer().isNot(AsmToken::RBrac))
5217 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005218 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005219 } else {
5220 // sel:%d
5221 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005222 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00005223 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005224
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005225 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005226 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005227 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005228 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005229 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005230 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005231 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005232 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005233 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005234 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005235 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005236 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005237 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005238 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00005239 } else if (Prefix == "row_bcast") {
5240 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005241 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00005242 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005243 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00005244 } else {
5245 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005246 }
5247 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005248 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005249 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005250 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005251 }
Sam Koltona74cd522016-03-18 15:35:51 +00005252
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005253 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00005254 return MatchOperand_Success;
5255}
5256
Sam Kolton5f10a132016-05-06 11:31:17 +00005257AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005258 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005259}
5260
Sam Kolton5f10a132016-05-06 11:31:17 +00005261AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005262 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005263}
5264
Sam Kolton5f10a132016-05-06 11:31:17 +00005265AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005266 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00005267}
5268
5269void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00005270 OptionalImmIndexMap OptionalIdx;
5271
5272 unsigned I = 1;
5273 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5274 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5275 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5276 }
5277
5278 for (unsigned E = Operands.size(); I != E; ++I) {
Valery Pykhtin3d9afa22018-11-30 14:21:56 +00005279 auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(),
5280 MCOI::TIED_TO);
5281 if (TiedTo != -1) {
5282 assert((unsigned)TiedTo < Inst.getNumOperands());
5283 // handle tied old or src2 for MAC instructions
5284 Inst.addOperand(Inst.getOperand(TiedTo));
5285 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005286 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5287 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00005288 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00005289 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00005290 // Skip it.
5291 continue;
5292 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00005293 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00005294 } else if (Op.isDPPCtrl()) {
5295 Op.addImmOperands(Inst, 1);
5296 } else if (Op.isImm()) {
5297 // Handle optional arguments
5298 OptionalIdx[Op.getImmTy()] = I;
5299 } else {
5300 llvm_unreachable("Invalid operand type");
5301 }
5302 }
5303
Sam Koltondfa29f72016-03-09 12:29:31 +00005304 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
5305 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
5306 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
5307}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00005308
Sam Kolton3025e7f2016-04-26 13:33:56 +00005309//===----------------------------------------------------------------------===//
5310// sdwa
5311//===----------------------------------------------------------------------===//
5312
Alex Bradbury58eba092016-11-01 16:32:05 +00005313OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00005314AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
5315 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005316 using namespace llvm::AMDGPU::SDWA;
5317
Sam Kolton3025e7f2016-04-26 13:33:56 +00005318 SMLoc S = Parser.getTok().getLoc();
5319 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005320 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00005321
Sam Kolton05ef1c92016-06-03 10:27:37 +00005322 res = parseStringWithPrefix(Prefix, Value);
5323 if (res != MatchOperand_Success) {
5324 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005325 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005326
Sam Kolton3025e7f2016-04-26 13:33:56 +00005327 int64_t Int;
5328 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005329 .Case("BYTE_0", SdwaSel::BYTE_0)
5330 .Case("BYTE_1", SdwaSel::BYTE_1)
5331 .Case("BYTE_2", SdwaSel::BYTE_2)
5332 .Case("BYTE_3", SdwaSel::BYTE_3)
5333 .Case("WORD_0", SdwaSel::WORD_0)
5334 .Case("WORD_1", SdwaSel::WORD_1)
5335 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005336 .Default(0xffffffff);
5337 Parser.Lex(); // eat last token
5338
5339 if (Int == 0xffffffff) {
5340 return MatchOperand_ParseFail;
5341 }
5342
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005343 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005344 return MatchOperand_Success;
5345}
5346
Alex Bradbury58eba092016-11-01 16:32:05 +00005347OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00005348AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005349 using namespace llvm::AMDGPU::SDWA;
5350
Sam Kolton3025e7f2016-04-26 13:33:56 +00005351 SMLoc S = Parser.getTok().getLoc();
5352 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005353 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005354
5355 res = parseStringWithPrefix("dst_unused", Value);
5356 if (res != MatchOperand_Success) {
5357 return res;
5358 }
5359
5360 int64_t Int;
5361 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005362 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
5363 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
5364 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005365 .Default(0xffffffff);
5366 Parser.Lex(); // eat last token
5367
5368 if (Int == 0xffffffff) {
5369 return MatchOperand_ParseFail;
5370 }
5371
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005372 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005373 return MatchOperand_Success;
5374}
5375
Sam Kolton945231a2016-06-10 09:57:59 +00005376void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005377 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005378}
5379
Sam Kolton945231a2016-06-10 09:57:59 +00005380void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005381 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
5382}
5383
Sam Koltonf7659d712017-05-23 10:08:55 +00005384void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
5385 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
5386}
5387
Sam Kolton5196b882016-07-01 09:59:21 +00005388void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005389 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00005390}
5391
5392void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00005393 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00005394 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005395
Sam Kolton05ef1c92016-06-03 10:27:37 +00005396 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00005397 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005398
5399 unsigned I = 1;
5400 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5401 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5402 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5403 }
5404
5405 for (unsigned E = Operands.size(); I != E; ++I) {
5406 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltonf7659d712017-05-23 10:08:55 +00005407 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
5408 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
5409 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
5410 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
5411 // Skip VCC only if we didn't skip it on previous iteration.
5412 if (BasicInstType == SIInstrFlags::VOP2 &&
5413 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
5414 skippedVcc = true;
5415 continue;
5416 } else if (BasicInstType == SIInstrFlags::VOPC &&
5417 Inst.getNumOperands() == 0) {
5418 skippedVcc = true;
5419 continue;
5420 }
5421 }
5422 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00005423 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005424 } else if (Op.isImm()) {
5425 // Handle optional arguments
5426 OptionalIdx[Op.getImmTy()] = I;
5427 } else {
5428 llvm_unreachable("Invalid operand type");
5429 }
Sam Koltonf7659d712017-05-23 10:08:55 +00005430 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005431 }
5432
Sam Koltonf7659d712017-05-23 10:08:55 +00005433 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
5434 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00005435 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00005436 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005437 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00005438 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005439 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005440 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5441 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005442 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5443 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5444 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005445 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005446
5447 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00005448 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005449 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005450 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5451 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005452 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5453 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5454 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5455 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005456 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005457
5458 case SIInstrFlags::VOPC:
Sam Kolton549c89d2017-06-21 08:53:38 +00005459 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00005460 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5461 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005462 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005463
Sam Koltona3ec5c12016-10-07 14:46:06 +00005464 default:
5465 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
5466 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005467 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00005468
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005469 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00005470 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00005471 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
5472 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005473 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005474 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00005475 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005476 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00005477 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005478}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00005479
Tom Stellard45bb48e2015-06-13 03:28:10 +00005480/// Force static initialization.
5481extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00005482 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
5483 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00005484}
5485
5486#define GET_REGISTER_MATCHER
5487#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00005488#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00005489#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00005490
Sam Kolton11de3702016-05-24 12:38:33 +00005491// This fuction should be defined after auto-generated include so that we have
5492// MatchClassKind enum defined
5493unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
5494 unsigned Kind) {
5495 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00005496 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00005497 // operand. This method checks if we are given immediate operand but expect to
5498 // get corresponding token.
5499 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
5500 switch (Kind) {
5501 case MCK_addr64:
5502 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
5503 case MCK_gds:
5504 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005505 case MCK_lds:
5506 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005507 case MCK_glc:
5508 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
5509 case MCK_idxen:
5510 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
5511 case MCK_offen:
5512 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005513 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00005514 // When operands have expression values, they will return true for isToken,
5515 // because it is not possible to distinguish between a token and an
5516 // expression at parse time. MatchInstructionImpl() will always try to
5517 // match an operand as a token, when isToken returns true, and when the
5518 // name of the expression is not a valid token, the match will fail,
5519 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005520 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
5521 case MCK_SSrcF32:
5522 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00005523 case MCK_SoppBrTarget:
5524 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005525 case MCK_VReg32OrOff:
5526 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00005527 case MCK_InterpSlot:
5528 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
5529 case MCK_Attr:
5530 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
5531 case MCK_AttrChan:
5532 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005533 default:
5534 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005535 }
5536}