blob: 57c552da19f951be60109c5176b5fef3d2a5ddee [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000010#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000011#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000012#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000013#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000014#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000015#include "SIInstrInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000016#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000018#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000020#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000021#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000023#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000025#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000028#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000029#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000033#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000037#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000039#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000040#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000043#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000044#include "llvm/Support/AMDGPUMetadata.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000045#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000046#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000047#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000048#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000049#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000050#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000052#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000053#include <algorithm>
54#include <cassert>
55#include <cstdint>
56#include <cstring>
57#include <iterator>
58#include <map>
59#include <memory>
60#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000061
Tom Stellard45bb48e2015-06-13 03:28:10 +000062using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000063using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000064
65namespace {
66
Sam Kolton1eeb11b2016-09-09 14:44:04 +000067class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000068
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000069enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
70
Sam Kolton1eeb11b2016-09-09 14:44:04 +000071//===----------------------------------------------------------------------===//
72// Operand
73//===----------------------------------------------------------------------===//
74
Tom Stellard45bb48e2015-06-13 03:28:10 +000075class AMDGPUOperand : public MCParsedAsmOperand {
76 enum KindTy {
77 Token,
78 Immediate,
79 Register,
80 Expression
81 } Kind;
82
83 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000084 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000085
86public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000087 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000088 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000089
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000090 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000091
Sam Kolton945231a2016-06-10 09:57:59 +000092 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000093 bool Abs = false;
94 bool Neg = false;
95 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000096
97 bool hasFPModifiers() const { return Abs || Neg; }
98 bool hasIntModifiers() const { return Sext; }
99 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
100
101 int64_t getFPModifiersOperand() const {
102 int64_t Operand = 0;
103 Operand |= Abs ? SISrcMods::ABS : 0;
104 Operand |= Neg ? SISrcMods::NEG : 0;
105 return Operand;
106 }
107
108 int64_t getIntModifiersOperand() const {
109 int64_t Operand = 0;
110 Operand |= Sext ? SISrcMods::SEXT : 0;
111 return Operand;
112 }
113
114 int64_t getModifiersOperand() const {
115 assert(!(hasFPModifiers() && hasIntModifiers())
116 && "fp and int modifiers should not be used simultaneously");
117 if (hasFPModifiers()) {
118 return getFPModifiersOperand();
119 } else if (hasIntModifiers()) {
120 return getIntModifiersOperand();
121 } else {
122 return 0;
123 }
124 }
125
126 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
127 };
128
Tom Stellard45bb48e2015-06-13 03:28:10 +0000129 enum ImmTy {
130 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000132 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000133 ImmTyOffen,
134 ImmTyIdxen,
135 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000136 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000137 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000138 ImmTyOffset0,
139 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000140 ImmTyGLC,
141 ImmTySLC,
142 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000143 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000144 ImmTyClampSI,
145 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000146 ImmTyDppCtrl,
147 ImmTyDppRowMask,
148 ImmTyDppBankMask,
149 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000150 ImmTySdwaDstSel,
151 ImmTySdwaSrc0Sel,
152 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000153 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000154 ImmTyDMask,
155 ImmTyUNorm,
156 ImmTyDA,
157 ImmTyR128,
158 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000159 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000160 ImmTyExpCompr,
161 ImmTyExpVM,
David Stuttard70e8bc12017-06-22 16:29:22 +0000162 ImmTyDFMT,
163 ImmTyNFMT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000164 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000165 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000166 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000167 ImmTyInterpSlot,
168 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000169 ImmTyAttrChan,
170 ImmTyOpSel,
171 ImmTyOpSelHi,
172 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000173 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000174 ImmTySwizzle,
175 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000176 };
177
178 struct TokOp {
179 const char *Data;
180 unsigned Length;
181 };
182
183 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000184 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000185 ImmTy Type;
186 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000187 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000188 };
189
190 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000191 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000192 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000193 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000194 };
195
196 union {
197 TokOp Tok;
198 ImmOp Imm;
199 RegOp Reg;
200 const MCExpr *Expr;
201 };
202
Tom Stellard45bb48e2015-06-13 03:28:10 +0000203 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000204 if (Kind == Token)
205 return true;
206
207 if (Kind != Expression || !Expr)
208 return false;
209
210 // When parsing operands, we can't always tell if something was meant to be
211 // a token, like 'gds', or an expression that references a global variable.
212 // In this case, we assume the string is an expression, and if we need to
213 // interpret is a token, then we treat the symbol name as the token.
214 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000215 }
216
217 bool isImm() const override {
218 return Kind == Immediate;
219 }
220
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000221 bool isInlinableImm(MVT type) const;
222 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000223
Tom Stellard45bb48e2015-06-13 03:28:10 +0000224 bool isRegKind() const {
225 return Kind == Register;
226 }
227
228 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000229 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000230 }
231
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000232 bool isRegOrImmWithInputMods(MVT type) const {
233 return isRegKind() || isInlinableImm(type);
234 }
235
Matt Arsenault4bd72362016-12-10 00:39:12 +0000236 bool isRegOrImmWithInt16InputMods() const {
237 return isRegOrImmWithInputMods(MVT::i16);
238 }
239
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000240 bool isRegOrImmWithInt32InputMods() const {
241 return isRegOrImmWithInputMods(MVT::i32);
242 }
243
244 bool isRegOrImmWithInt64InputMods() const {
245 return isRegOrImmWithInputMods(MVT::i64);
246 }
247
Matt Arsenault4bd72362016-12-10 00:39:12 +0000248 bool isRegOrImmWithFP16InputMods() const {
249 return isRegOrImmWithInputMods(MVT::f16);
250 }
251
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000252 bool isRegOrImmWithFP32InputMods() const {
253 return isRegOrImmWithInputMods(MVT::f32);
254 }
255
256 bool isRegOrImmWithFP64InputMods() const {
257 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000258 }
259
Sam Kolton9772eb32017-01-11 11:46:30 +0000260 bool isVReg() const {
261 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
262 isRegClass(AMDGPU::VReg_64RegClassID) ||
263 isRegClass(AMDGPU::VReg_96RegClassID) ||
264 isRegClass(AMDGPU::VReg_128RegClassID) ||
265 isRegClass(AMDGPU::VReg_256RegClassID) ||
266 isRegClass(AMDGPU::VReg_512RegClassID);
267 }
268
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000269 bool isVReg32OrOff() const {
270 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
271 }
272
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000273 bool isSDWAOperand(MVT type) const;
274 bool isSDWAFP16Operand() const;
275 bool isSDWAFP32Operand() const;
276 bool isSDWAInt16Operand() const;
277 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000278
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000279 bool isImmTy(ImmTy ImmT) const {
280 return isImm() && Imm.Type == ImmT;
281 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000282
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000283 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000284 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000285 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000286
Sam Kolton945231a2016-06-10 09:57:59 +0000287 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
288 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
289 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000290 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
291 bool isDA() const { return isImmTy(ImmTyDA); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000292 bool isR128() const { return isImmTy(ImmTyR128); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000293 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000294 bool isOff() const { return isImmTy(ImmTyOff); }
295 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000296 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
297 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000298 bool isOffen() const { return isImmTy(ImmTyOffen); }
299 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
300 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
301 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
302 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
303 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000304
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000305 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
306 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000307 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000308 bool isLDS() const { return isImmTy(ImmTyLDS); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000309 bool isGLC() const { return isImmTy(ImmTyGLC); }
310 bool isSLC() const { return isImmTy(ImmTySLC); }
311 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000312 bool isD16() const { return isImmTy(ImmTyD16); }
David Stuttard70e8bc12017-06-22 16:29:22 +0000313 bool isDFMT() const { return isImmTy(ImmTyDFMT) && isUInt<8>(getImm()); }
314 bool isNFMT() const { return isImmTy(ImmTyNFMT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000315 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
316 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
317 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
318 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
319 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
320 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
321 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000322 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
323 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
324 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000325 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
326 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
327 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
328 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000329 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000330
Sam Kolton945231a2016-06-10 09:57:59 +0000331 bool isMod() const {
332 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000333 }
334
335 bool isRegOrImm() const {
336 return isReg() || isImm();
337 }
338
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000339 bool isRegClass(unsigned RCID) const;
340
Sam Kolton9772eb32017-01-11 11:46:30 +0000341 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
342 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
343 }
344
Matt Arsenault4bd72362016-12-10 00:39:12 +0000345 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000346 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000347 }
348
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000349 bool isSCSrcV2B16() const {
350 return isSCSrcB16();
351 }
352
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000353 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000354 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000355 }
356
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000357 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000358 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000359 }
360
Matt Arsenault4bd72362016-12-10 00:39:12 +0000361 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000362 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000363 }
364
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000365 bool isSCSrcV2F16() const {
366 return isSCSrcF16();
367 }
368
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000369 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000370 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000371 }
372
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000373 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000374 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000375 }
376
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000377 bool isSSrcB32() const {
378 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
379 }
380
Matt Arsenault4bd72362016-12-10 00:39:12 +0000381 bool isSSrcB16() const {
382 return isSCSrcB16() || isLiteralImm(MVT::i16);
383 }
384
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000385 bool isSSrcV2B16() const {
386 llvm_unreachable("cannot happen");
387 return isSSrcB16();
388 }
389
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000390 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000391 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
392 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000393 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000394 }
395
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000396 bool isSSrcF32() const {
397 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000398 }
399
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000400 bool isSSrcF64() const {
401 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000402 }
403
Matt Arsenault4bd72362016-12-10 00:39:12 +0000404 bool isSSrcF16() const {
405 return isSCSrcB16() || isLiteralImm(MVT::f16);
406 }
407
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000408 bool isSSrcV2F16() const {
409 llvm_unreachable("cannot happen");
410 return isSSrcF16();
411 }
412
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000413 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000414 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000415 }
416
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000417 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000418 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000419 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000420
Matt Arsenault4bd72362016-12-10 00:39:12 +0000421 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000422 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000423 }
424
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000425 bool isVCSrcV2B16() const {
426 return isVCSrcB16();
427 }
428
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000429 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000430 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000431 }
432
433 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000434 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000435 }
436
Matt Arsenault4bd72362016-12-10 00:39:12 +0000437 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000438 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000439 }
440
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000441 bool isVCSrcV2F16() const {
442 return isVCSrcF16();
443 }
444
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000445 bool isVSrcB32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000446 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000447 }
448
449 bool isVSrcB64() const {
450 return isVCSrcF64() || isLiteralImm(MVT::i64);
451 }
452
Matt Arsenault4bd72362016-12-10 00:39:12 +0000453 bool isVSrcB16() const {
454 return isVCSrcF16() || isLiteralImm(MVT::i16);
455 }
456
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000457 bool isVSrcV2B16() const {
458 llvm_unreachable("cannot happen");
459 return isVSrcB16();
460 }
461
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000462 bool isVSrcF32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000463 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000464 }
465
466 bool isVSrcF64() const {
467 return isVCSrcF64() || isLiteralImm(MVT::f64);
468 }
469
Matt Arsenault4bd72362016-12-10 00:39:12 +0000470 bool isVSrcF16() const {
471 return isVCSrcF16() || isLiteralImm(MVT::f16);
472 }
473
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000474 bool isVSrcV2F16() const {
475 llvm_unreachable("cannot happen");
476 return isVSrcF16();
477 }
478
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000479 bool isKImmFP32() const {
480 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000481 }
482
Matt Arsenault4bd72362016-12-10 00:39:12 +0000483 bool isKImmFP16() const {
484 return isLiteralImm(MVT::f16);
485 }
486
Tom Stellard45bb48e2015-06-13 03:28:10 +0000487 bool isMem() const override {
488 return false;
489 }
490
491 bool isExpr() const {
492 return Kind == Expression;
493 }
494
495 bool isSoppBrTarget() const {
496 return isExpr() || isImm();
497 }
498
Sam Kolton945231a2016-06-10 09:57:59 +0000499 bool isSWaitCnt() const;
500 bool isHwreg() const;
501 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000502 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000503 bool isSMRDOffset8() const;
504 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000505 bool isSMRDLiteralOffset() const;
506 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000507 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000508 bool isS16Imm() const;
509 bool isU16Imm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000510
Tom Stellard89049702016-06-15 02:54:14 +0000511 StringRef getExpressionAsToken() const {
512 assert(isExpr());
513 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
514 return S->getSymbol().getName();
515 }
516
Sam Kolton945231a2016-06-10 09:57:59 +0000517 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000518 assert(isToken());
519
520 if (Kind == Expression)
521 return getExpressionAsToken();
522
Sam Kolton945231a2016-06-10 09:57:59 +0000523 return StringRef(Tok.Data, Tok.Length);
524 }
525
526 int64_t getImm() const {
527 assert(isImm());
528 return Imm.Val;
529 }
530
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000531 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000532 assert(isImm());
533 return Imm.Type;
534 }
535
536 unsigned getReg() const override {
537 return Reg.RegNo;
538 }
539
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540 SMLoc getStartLoc() const override {
541 return StartLoc;
542 }
543
Peter Collingbourne0da86302016-10-10 22:49:37 +0000544 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000545 return EndLoc;
546 }
547
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000548 SMRange getLocRange() const {
549 return SMRange(StartLoc, EndLoc);
550 }
551
Sam Kolton945231a2016-06-10 09:57:59 +0000552 Modifiers getModifiers() const {
553 assert(isRegKind() || isImmTy(ImmTyNone));
554 return isRegKind() ? Reg.Mods : Imm.Mods;
555 }
556
557 void setModifiers(Modifiers Mods) {
558 assert(isRegKind() || isImmTy(ImmTyNone));
559 if (isRegKind())
560 Reg.Mods = Mods;
561 else
562 Imm.Mods = Mods;
563 }
564
565 bool hasModifiers() const {
566 return getModifiers().hasModifiers();
567 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000568
Sam Kolton945231a2016-06-10 09:57:59 +0000569 bool hasFPModifiers() const {
570 return getModifiers().hasFPModifiers();
571 }
572
573 bool hasIntModifiers() const {
574 return getModifiers().hasIntModifiers();
575 }
576
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000577 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
578
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000579 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000580
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000581 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000582
Matt Arsenault4bd72362016-12-10 00:39:12 +0000583 template <unsigned Bitwidth>
584 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
585
586 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
587 addKImmFPOperands<16>(Inst, N);
588 }
589
590 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
591 addKImmFPOperands<32>(Inst, N);
592 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000593
594 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000595
596 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
597 if (isRegKind())
598 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000599 else if (isExpr())
600 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000601 else
602 addImmOperands(Inst, N);
603 }
604
605 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
606 Modifiers Mods = getModifiers();
607 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
608 if (isRegKind()) {
609 addRegOperands(Inst, N);
610 } else {
611 addImmOperands(Inst, N, false);
612 }
613 }
614
615 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
616 assert(!hasIntModifiers());
617 addRegOrImmWithInputModsOperands(Inst, N);
618 }
619
620 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
621 assert(!hasFPModifiers());
622 addRegOrImmWithInputModsOperands(Inst, N);
623 }
624
Sam Kolton9772eb32017-01-11 11:46:30 +0000625 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
626 Modifiers Mods = getModifiers();
627 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
628 assert(isRegKind());
629 addRegOperands(Inst, N);
630 }
631
632 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
633 assert(!hasIntModifiers());
634 addRegWithInputModsOperands(Inst, N);
635 }
636
637 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
638 assert(!hasFPModifiers());
639 addRegWithInputModsOperands(Inst, N);
640 }
641
Sam Kolton945231a2016-06-10 09:57:59 +0000642 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
643 if (isImm())
644 addImmOperands(Inst, N);
645 else {
646 assert(isExpr());
647 Inst.addOperand(MCOperand::createExpr(Expr));
648 }
649 }
650
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000651 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000652 switch (Type) {
653 case ImmTyNone: OS << "None"; break;
654 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000655 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000656 case ImmTyOffen: OS << "Offen"; break;
657 case ImmTyIdxen: OS << "Idxen"; break;
658 case ImmTyAddr64: OS << "Addr64"; break;
659 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000660 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000661 case ImmTyOffset0: OS << "Offset0"; break;
662 case ImmTyOffset1: OS << "Offset1"; break;
663 case ImmTyGLC: OS << "GLC"; break;
664 case ImmTySLC: OS << "SLC"; break;
665 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000666 case ImmTyD16: OS << "D16"; break;
David Stuttard70e8bc12017-06-22 16:29:22 +0000667 case ImmTyDFMT: OS << "DFMT"; break;
668 case ImmTyNFMT: OS << "NFMT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000669 case ImmTyClampSI: OS << "ClampSI"; break;
670 case ImmTyOModSI: OS << "OModSI"; break;
671 case ImmTyDppCtrl: OS << "DppCtrl"; break;
672 case ImmTyDppRowMask: OS << "DppRowMask"; break;
673 case ImmTyDppBankMask: OS << "DppBankMask"; break;
674 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000675 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
676 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
677 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000678 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
679 case ImmTyDMask: OS << "DMask"; break;
680 case ImmTyUNorm: OS << "UNorm"; break;
681 case ImmTyDA: OS << "DA"; break;
682 case ImmTyR128: OS << "R128"; break;
683 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000684 case ImmTyOff: OS << "Off"; break;
685 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000686 case ImmTyExpCompr: OS << "ExpCompr"; break;
687 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000688 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000689 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000690 case ImmTyInterpSlot: OS << "InterpSlot"; break;
691 case ImmTyInterpAttr: OS << "InterpAttr"; break;
692 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000693 case ImmTyOpSel: OS << "OpSel"; break;
694 case ImmTyOpSelHi: OS << "OpSelHi"; break;
695 case ImmTyNegLo: OS << "NegLo"; break;
696 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000697 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000698 case ImmTyHigh: OS << "High"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000699 }
700 }
701
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000702 void print(raw_ostream &OS) const override {
703 switch (Kind) {
704 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000705 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000706 break;
707 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000708 OS << '<' << getImm();
709 if (getImmTy() != ImmTyNone) {
710 OS << " type: "; printImmTy(OS, getImmTy());
711 }
Sam Kolton945231a2016-06-10 09:57:59 +0000712 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000713 break;
714 case Token:
715 OS << '\'' << getToken() << '\'';
716 break;
717 case Expression:
718 OS << "<expr " << *Expr << '>';
719 break;
720 }
721 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000722
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000723 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
724 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000725 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000726 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000727 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000728 Op->Imm.Val = Val;
729 Op->Imm.IsFPImm = IsFPImm;
730 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000731 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000732 Op->StartLoc = Loc;
733 Op->EndLoc = Loc;
734 return Op;
735 }
736
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000737 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
738 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000739 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000740 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000741 Res->Tok.Data = Str.data();
742 Res->Tok.Length = Str.size();
743 Res->StartLoc = Loc;
744 Res->EndLoc = Loc;
745 return Res;
746 }
747
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000748 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
749 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000750 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000751 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000752 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000753 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000754 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000755 Op->Reg.IsForcedVOP3 = ForceVOP3;
756 Op->StartLoc = S;
757 Op->EndLoc = E;
758 return Op;
759 }
760
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000761 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
762 const class MCExpr *Expr, SMLoc S) {
763 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000764 Op->Expr = Expr;
765 Op->StartLoc = S;
766 Op->EndLoc = S;
767 return Op;
768 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000769};
770
Sam Kolton945231a2016-06-10 09:57:59 +0000771raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
772 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
773 return OS;
774}
775
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000776//===----------------------------------------------------------------------===//
777// AsmParser
778//===----------------------------------------------------------------------===//
779
Artem Tamazova01cce82016-12-27 16:00:11 +0000780// Holds info related to the current kernel, e.g. count of SGPRs used.
781// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
782// .amdgpu_hsa_kernel or at EOF.
783class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000784 int SgprIndexUnusedMin = -1;
785 int VgprIndexUnusedMin = -1;
786 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000787
788 void usesSgprAt(int i) {
789 if (i >= SgprIndexUnusedMin) {
790 SgprIndexUnusedMin = ++i;
791 if (Ctx) {
792 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
793 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
794 }
795 }
796 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000797
Artem Tamazova01cce82016-12-27 16:00:11 +0000798 void usesVgprAt(int i) {
799 if (i >= VgprIndexUnusedMin) {
800 VgprIndexUnusedMin = ++i;
801 if (Ctx) {
802 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
803 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
804 }
805 }
806 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000807
Artem Tamazova01cce82016-12-27 16:00:11 +0000808public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000809 KernelScopeInfo() = default;
810
Artem Tamazova01cce82016-12-27 16:00:11 +0000811 void initialize(MCContext &Context) {
812 Ctx = &Context;
813 usesSgprAt(SgprIndexUnusedMin = -1);
814 usesVgprAt(VgprIndexUnusedMin = -1);
815 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000816
Artem Tamazova01cce82016-12-27 16:00:11 +0000817 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
818 switch (RegKind) {
819 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
820 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
821 default: break;
822 }
823 }
824};
825
Tom Stellard45bb48e2015-06-13 03:28:10 +0000826class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000827 MCAsmParser &Parser;
828
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000829 // Number of extra operands parsed after the first optional operand.
830 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000831 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000832
Eugene Zelenko66203762017-01-21 00:53:49 +0000833 unsigned ForcedEncodingSize = 0;
834 bool ForcedDPP = false;
835 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000836 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000837
Tom Stellard45bb48e2015-06-13 03:28:10 +0000838 /// @name Auto-generated Match Functions
839 /// {
840
841#define GET_ASSEMBLER_HEADER
842#include "AMDGPUGenAsmMatcher.inc"
843
844 /// }
845
Tom Stellard347ac792015-06-26 21:15:07 +0000846private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000847 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Tom Stellard347ac792015-06-26 21:15:07 +0000848 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
849 bool ParseDirectiveHSACodeObjectVersion();
850 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000851 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
852 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000853 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000854 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000855
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000856 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +0000857 bool ParseDirectiveHSAMetadata();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000858 bool ParseDirectivePALMetadata();
859
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000860 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
861 RegisterKind RegKind, unsigned Reg1,
862 unsigned RegNum);
863 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
864 unsigned& RegNum, unsigned& RegWidth,
865 unsigned *DwordRegIndex);
866 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +0000867 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000868 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
869 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000870
Tom Stellard45bb48e2015-06-13 03:28:10 +0000871public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000872 enum AMDGPUMatchResultTy {
873 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
874 };
875
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000876 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000877
Akira Hatanakab11ef082015-11-14 06:35:56 +0000878 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000879 const MCInstrInfo &MII,
880 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +0000881 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000882 MCAsmParserExtension::Initialize(Parser);
883
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000884 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000885 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000886 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000887 }
888
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000889 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000890
891 {
892 // TODO: make those pre-defined variables read-only.
893 // Currently there is none suitable machinery in the core llvm-mc for this.
894 // MCSymbol::isRedefinable is intended for another purpose, and
895 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000896 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000897 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Artem Tamazov17091362016-06-14 15:03:59 +0000898 MCContext &Ctx = getContext();
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000899 MCSymbol *Sym =
900 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
901 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000902 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000903 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000904 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000905 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000906 }
Artem Tamazova01cce82016-12-27 16:00:11 +0000907 KernelScope.initialize(getContext());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000908 }
909
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000910 bool hasXNACK() const {
911 return AMDGPU::hasXNACK(getSTI());
912 }
913
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000914 bool hasMIMG_R128() const {
915 return AMDGPU::hasMIMG_R128(getSTI());
916 }
917
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000918 bool hasPackedD16() const {
919 return AMDGPU::hasPackedD16(getSTI());
920 }
921
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000922 bool isSI() const {
923 return AMDGPU::isSI(getSTI());
924 }
925
926 bool isCI() const {
927 return AMDGPU::isCI(getSTI());
928 }
929
930 bool isVI() const {
931 return AMDGPU::isVI(getSTI());
932 }
933
Sam Koltonf7659d712017-05-23 10:08:55 +0000934 bool isGFX9() const {
935 return AMDGPU::isGFX9(getSTI());
936 }
937
Matt Arsenault26faed32016-12-05 22:26:17 +0000938 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000939 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +0000940 }
941
Matt Arsenaultfd023142017-06-12 15:55:58 +0000942 bool hasFlatOffsets() const {
943 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
944 }
945
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000946 bool hasSGPR102_SGPR103() const {
947 return !isVI();
948 }
949
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +0000950 bool hasIntClamp() const {
951 return getFeatureBits()[AMDGPU::FeatureIntClamp];
952 }
953
Tom Stellard347ac792015-06-26 21:15:07 +0000954 AMDGPUTargetStreamer &getTargetStreamer() {
955 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
956 return static_cast<AMDGPUTargetStreamer &>(TS);
957 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000958
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000959 const MCRegisterInfo *getMRI() const {
960 // We need this const_cast because for some reason getContext() is not const
961 // in MCAsmParser.
962 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
963 }
964
965 const MCInstrInfo *getMII() const {
966 return &MII;
967 }
968
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000969 const FeatureBitset &getFeatureBits() const {
970 return getSTI().getFeatureBits();
971 }
972
Sam Kolton05ef1c92016-06-03 10:27:37 +0000973 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
974 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
975 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000976
Sam Kolton05ef1c92016-06-03 10:27:37 +0000977 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
978 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
979 bool isForcedDPP() const { return ForcedDPP; }
980 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +0000981 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000982
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000983 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000984 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
985 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000986 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
987 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000988 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
989 OperandVector &Operands, MCStreamer &Out,
990 uint64_t &ErrorInfo,
991 bool MatchingInlineAsm) override;
992 bool ParseDirective(AsmToken DirectiveID) override;
993 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000994 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000995 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
996 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000997 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000998
Sam Kolton11de3702016-05-24 12:38:33 +0000999 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001000
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001001 OperandMatchResultTy
1002 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001003 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001004 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001005
1006 OperandMatchResultTy parseOperandArrayWithPrefix(
1007 const char *Prefix,
1008 OperandVector &Operands,
1009 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1010 bool (*ConvertResult)(int64_t&) = nullptr);
1011
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001012 OperandMatchResultTy
1013 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001014 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001015 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1016 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001017
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001018 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
1019 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001020 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001021 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001022 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1023 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1024 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1025 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001026 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001027
Tom Stellard45bb48e2015-06-13 03:28:10 +00001028 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001029 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1030 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001031 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001032
1033 bool parseCnt(int64_t &IntVal);
1034 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001035 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001036
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001037private:
1038 struct OperandInfoTy {
1039 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001040 bool IsSymbolic = false;
1041
1042 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001043 };
Sam Kolton11de3702016-05-24 12:38:33 +00001044
Artem Tamazov6edc1352016-05-26 17:00:33 +00001045 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1046 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001047
1048 void errorExpTgt();
1049 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1050
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001051 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
1052 bool validateConstantBusLimitations(const MCInst &Inst);
1053 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001054 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001055 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001056 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001057 bool validateMIMGDataSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001058 bool validateMIMGR128(const MCInst &Inst);
1059 bool validateMIMGD16(const MCInst &Inst);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001060 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1061 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1062 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001063
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001064 bool trySkipId(const StringRef Id);
1065 bool trySkipToken(const AsmToken::TokenKind Kind);
1066 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1067 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1068 bool parseExpr(int64_t &Imm);
1069
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001070public:
Sam Kolton11de3702016-05-24 12:38:33 +00001071 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001072 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001073
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001074 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001075 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001076 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1077 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001078 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1079
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001080 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1081 const unsigned MinVal,
1082 const unsigned MaxVal,
1083 const StringRef ErrMsg);
1084 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1085 bool parseSwizzleOffset(int64_t &Imm);
1086 bool parseSwizzleMacro(int64_t &Imm);
1087 bool parseSwizzleQuadPerm(int64_t &Imm);
1088 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1089 bool parseSwizzleBroadcast(int64_t &Imm);
1090 bool parseSwizzleSwap(int64_t &Imm);
1091 bool parseSwizzleReverse(int64_t &Imm);
1092
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001093 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1094 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1095 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001096 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001097 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1098
Sam Kolton5f10a132016-05-06 11:31:17 +00001099 AMDGPUOperand::Ptr defaultGLC() const;
1100 AMDGPUOperand::Ptr defaultSLC() const;
1101 AMDGPUOperand::Ptr defaultTFE() const;
1102
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00001103 AMDGPUOperand::Ptr defaultD16() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001104 AMDGPUOperand::Ptr defaultDMask() const;
1105 AMDGPUOperand::Ptr defaultUNorm() const;
1106 AMDGPUOperand::Ptr defaultDA() const;
1107 AMDGPUOperand::Ptr defaultR128() const;
1108 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +00001109 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1110 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001111 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001112 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001113 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001114
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001115 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1116
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001117 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1118 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001119 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001120 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001121 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001122
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001123 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1124
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001125 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1126 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001127 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001128
Sam Kolton11de3702016-05-24 12:38:33 +00001129 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001130 AMDGPUOperand::Ptr defaultRowMask() const;
1131 AMDGPUOperand::Ptr defaultBankMask() const;
1132 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1133 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001134
Sam Kolton05ef1c92016-06-03 10:27:37 +00001135 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1136 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001137 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001138 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1139 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001140 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001141 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1142 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001143 uint64_t BasicInstType, bool skipVcc = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001144};
1145
1146struct OptionalOperand {
1147 const char *Name;
1148 AMDGPUOperand::ImmTy Type;
1149 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001150 bool (*ConvertResult)(int64_t&);
1151};
1152
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001153} // end anonymous namespace
1154
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001155// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001156static const fltSemantics *getFltSemantics(unsigned Size) {
1157 switch (Size) {
1158 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001159 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001160 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001161 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001162 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001163 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001164 default:
1165 llvm_unreachable("unsupported fp type");
1166 }
1167}
1168
Matt Arsenault4bd72362016-12-10 00:39:12 +00001169static const fltSemantics *getFltSemantics(MVT VT) {
1170 return getFltSemantics(VT.getSizeInBits() / 8);
1171}
1172
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001173static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1174 switch (OperandType) {
1175 case AMDGPU::OPERAND_REG_IMM_INT32:
1176 case AMDGPU::OPERAND_REG_IMM_FP32:
1177 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1178 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1179 return &APFloat::IEEEsingle();
1180 case AMDGPU::OPERAND_REG_IMM_INT64:
1181 case AMDGPU::OPERAND_REG_IMM_FP64:
1182 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1183 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1184 return &APFloat::IEEEdouble();
1185 case AMDGPU::OPERAND_REG_IMM_INT16:
1186 case AMDGPU::OPERAND_REG_IMM_FP16:
1187 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1188 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1189 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1190 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1191 return &APFloat::IEEEhalf();
1192 default:
1193 llvm_unreachable("unsupported fp type");
1194 }
1195}
1196
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001197//===----------------------------------------------------------------------===//
1198// Operand
1199//===----------------------------------------------------------------------===//
1200
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001201static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1202 bool Lost;
1203
1204 // Convert literal to single precision
1205 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1206 APFloat::rmNearestTiesToEven,
1207 &Lost);
1208 // We allow precision lost but not overflow or underflow
1209 if (Status != APFloat::opOK &&
1210 Lost &&
1211 ((Status & APFloat::opOverflow) != 0 ||
1212 (Status & APFloat::opUnderflow) != 0)) {
1213 return false;
1214 }
1215
1216 return true;
1217}
1218
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001219bool AMDGPUOperand::isInlinableImm(MVT type) const {
1220 if (!isImmTy(ImmTyNone)) {
1221 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1222 return false;
1223 }
1224 // TODO: We should avoid using host float here. It would be better to
1225 // check the float bit values which is what a few other places do.
1226 // We've had bot failures before due to weird NaN support on mips hosts.
1227
1228 APInt Literal(64, Imm.Val);
1229
1230 if (Imm.IsFPImm) { // We got fp literal token
1231 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001232 return AMDGPU::isInlinableLiteral64(Imm.Val,
1233 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001234 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001235
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001236 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001237 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1238 return false;
1239
Sam Kolton9dffada2017-01-17 15:26:02 +00001240 if (type.getScalarSizeInBits() == 16) {
1241 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001242 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001243 AsmParser->hasInv2PiInlineImm());
1244 }
1245
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001246 // Check if single precision literal is inlinable
1247 return AMDGPU::isInlinableLiteral32(
1248 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001249 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001250 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001251
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001252 // We got int literal token.
1253 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001254 return AMDGPU::isInlinableLiteral64(Imm.Val,
1255 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001256 }
1257
Matt Arsenault4bd72362016-12-10 00:39:12 +00001258 if (type.getScalarSizeInBits() == 16) {
1259 return AMDGPU::isInlinableLiteral16(
1260 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1261 AsmParser->hasInv2PiInlineImm());
1262 }
1263
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001264 return AMDGPU::isInlinableLiteral32(
1265 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001266 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001267}
1268
1269bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001270 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001271 if (!isImmTy(ImmTyNone)) {
1272 return false;
1273 }
1274
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001275 if (!Imm.IsFPImm) {
1276 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001277
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001278 if (type == MVT::f64 && hasFPModifiers()) {
1279 // Cannot apply fp modifiers to int literals preserving the same semantics
1280 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1281 // disable these cases.
1282 return false;
1283 }
1284
Matt Arsenault4bd72362016-12-10 00:39:12 +00001285 unsigned Size = type.getSizeInBits();
1286 if (Size == 64)
1287 Size = 32;
1288
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001289 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1290 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001291 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001292 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001293
1294 // We got fp literal token
1295 if (type == MVT::f64) { // Expected 64-bit fp operand
1296 // We would set low 64-bits of literal to zeroes but we accept this literals
1297 return true;
1298 }
1299
1300 if (type == MVT::i64) { // Expected 64-bit int operand
1301 // We don't allow fp literals in 64-bit integer instructions. It is
1302 // unclear how we should encode them.
1303 return false;
1304 }
1305
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001306 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001307 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001308}
1309
1310bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001311 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001312}
1313
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001314bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001315 if (AsmParser->isVI())
1316 return isVReg();
1317 else if (AsmParser->isGFX9())
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001318 return isRegKind() || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001319 else
1320 return false;
1321}
1322
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001323bool AMDGPUOperand::isSDWAFP16Operand() const {
1324 return isSDWAOperand(MVT::f16);
1325}
1326
1327bool AMDGPUOperand::isSDWAFP32Operand() const {
1328 return isSDWAOperand(MVT::f32);
1329}
1330
1331bool AMDGPUOperand::isSDWAInt16Operand() const {
1332 return isSDWAOperand(MVT::i16);
1333}
1334
1335bool AMDGPUOperand::isSDWAInt32Operand() const {
1336 return isSDWAOperand(MVT::i32);
1337}
1338
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001339uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1340{
1341 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1342 assert(Size == 2 || Size == 4 || Size == 8);
1343
1344 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1345
1346 if (Imm.Mods.Abs) {
1347 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001348 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001349 if (Imm.Mods.Neg) {
1350 Val ^= FpSignMask;
1351 }
1352
1353 return Val;
1354}
1355
1356void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001357 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1358 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001359 addLiteralImmOperand(Inst, Imm.Val,
1360 ApplyModifiers &
1361 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001362 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001363 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1364 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001365 }
1366}
1367
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001368void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001369 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1370 auto OpNum = Inst.getNumOperands();
1371 // Check that this operand accepts literals
1372 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1373
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001374 if (ApplyModifiers) {
1375 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1376 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1377 Val = applyInputFPModifiers(Val, Size);
1378 }
1379
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001380 APInt Literal(64, Val);
1381 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001382
1383 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001384 switch (OpTy) {
1385 case AMDGPU::OPERAND_REG_IMM_INT64:
1386 case AMDGPU::OPERAND_REG_IMM_FP64:
1387 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001388 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001389 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1390 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001391 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001392 return;
1393 }
1394
1395 // Non-inlineable
1396 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001397 // For fp operands we check if low 32 bits are zeros
1398 if (Literal.getLoBits(32) != 0) {
1399 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001400 "Can't encode literal as exact 64-bit floating-point operand. "
1401 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001402 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001403
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001404 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001405 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001406 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001407
1408 // We don't allow fp literals in 64-bit integer instructions. It is
1409 // unclear how we should encode them. This case should be checked earlier
1410 // in predicate methods (isLiteralImm())
1411 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001412
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001413 case AMDGPU::OPERAND_REG_IMM_INT32:
1414 case AMDGPU::OPERAND_REG_IMM_FP32:
1415 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1416 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1417 case AMDGPU::OPERAND_REG_IMM_INT16:
1418 case AMDGPU::OPERAND_REG_IMM_FP16:
1419 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1420 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1421 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1422 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001423 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001424 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001425 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001426 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001427 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001428 // We allow precision lost but not overflow or underflow. This should be
1429 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001430
1431 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1432 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1433 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1434 ImmVal |= (ImmVal << 16);
1435 }
1436
1437 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001438 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001439 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001440 default:
1441 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001442 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001443
1444 return;
1445 }
1446
1447 // We got int literal token.
1448 // Only sign extend inline immediates.
1449 // FIXME: No errors on truncation
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001450 switch (OpTy) {
1451 case AMDGPU::OPERAND_REG_IMM_INT32:
1452 case AMDGPU::OPERAND_REG_IMM_FP32:
1453 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001454 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001455 if (isInt<32>(Val) &&
1456 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1457 AsmParser->hasInv2PiInlineImm())) {
1458 Inst.addOperand(MCOperand::createImm(Val));
1459 return;
1460 }
1461
1462 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1463 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001464
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001465 case AMDGPU::OPERAND_REG_IMM_INT64:
1466 case AMDGPU::OPERAND_REG_IMM_FP64:
1467 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001468 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001469 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001470 Inst.addOperand(MCOperand::createImm(Val));
1471 return;
1472 }
1473
1474 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1475 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001476
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001477 case AMDGPU::OPERAND_REG_IMM_INT16:
1478 case AMDGPU::OPERAND_REG_IMM_FP16:
1479 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001480 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001481 if (isInt<16>(Val) &&
1482 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1483 AsmParser->hasInv2PiInlineImm())) {
1484 Inst.addOperand(MCOperand::createImm(Val));
1485 return;
1486 }
1487
1488 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1489 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001490
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001491 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1492 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1493 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1494 assert(AMDGPU::isInlinableLiteral16(LiteralVal,
1495 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001496
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001497 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1498 static_cast<uint32_t>(LiteralVal);
1499 Inst.addOperand(MCOperand::createImm(ImmVal));
1500 return;
1501 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001502 default:
1503 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001504 }
1505}
1506
Matt Arsenault4bd72362016-12-10 00:39:12 +00001507template <unsigned Bitwidth>
1508void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001509 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001510
1511 if (!Imm.IsFPImm) {
1512 // We got int literal token.
1513 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1514 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001515 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001516
1517 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001518 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001519 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1520 APFloat::rmNearestTiesToEven, &Lost);
1521 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001522}
1523
1524void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1525 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1526}
1527
1528//===----------------------------------------------------------------------===//
1529// AsmParser
1530//===----------------------------------------------------------------------===//
1531
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001532static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1533 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001534 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001535 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001536 case 1: return AMDGPU::VGPR_32RegClassID;
1537 case 2: return AMDGPU::VReg_64RegClassID;
1538 case 3: return AMDGPU::VReg_96RegClassID;
1539 case 4: return AMDGPU::VReg_128RegClassID;
1540 case 8: return AMDGPU::VReg_256RegClassID;
1541 case 16: return AMDGPU::VReg_512RegClassID;
1542 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001543 } else if (Is == IS_TTMP) {
1544 switch (RegWidth) {
1545 default: return -1;
1546 case 1: return AMDGPU::TTMP_32RegClassID;
1547 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001548 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001549 case 8: return AMDGPU::TTMP_256RegClassID;
1550 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001551 }
1552 } else if (Is == IS_SGPR) {
1553 switch (RegWidth) {
1554 default: return -1;
1555 case 1: return AMDGPU::SGPR_32RegClassID;
1556 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001557 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001558 case 8: return AMDGPU::SGPR_256RegClassID;
1559 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001560 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001561 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001562 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001563}
1564
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001565static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001566 return StringSwitch<unsigned>(RegName)
1567 .Case("exec", AMDGPU::EXEC)
1568 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001569 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001570 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001571 .Case("m0", AMDGPU::M0)
1572 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001573 .Case("tba", AMDGPU::TBA)
1574 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001575 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1576 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001577 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1578 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001579 .Case("vcc_lo", AMDGPU::VCC_LO)
1580 .Case("vcc_hi", AMDGPU::VCC_HI)
1581 .Case("exec_lo", AMDGPU::EXEC_LO)
1582 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001583 .Case("tma_lo", AMDGPU::TMA_LO)
1584 .Case("tma_hi", AMDGPU::TMA_HI)
1585 .Case("tba_lo", AMDGPU::TBA_LO)
1586 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001587 .Default(0);
1588}
1589
Eugene Zelenko66203762017-01-21 00:53:49 +00001590bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1591 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001592 auto R = parseRegister();
1593 if (!R) return true;
1594 assert(R->isReg());
1595 RegNo = R->getReg();
1596 StartLoc = R->getStartLoc();
1597 EndLoc = R->getEndLoc();
1598 return false;
1599}
1600
Eugene Zelenko66203762017-01-21 00:53:49 +00001601bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1602 RegisterKind RegKind, unsigned Reg1,
1603 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001604 switch (RegKind) {
1605 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001606 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1607 Reg = AMDGPU::EXEC;
1608 RegWidth = 2;
1609 return true;
1610 }
1611 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1612 Reg = AMDGPU::FLAT_SCR;
1613 RegWidth = 2;
1614 return true;
1615 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001616 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1617 Reg = AMDGPU::XNACK_MASK;
1618 RegWidth = 2;
1619 return true;
1620 }
Eugene Zelenko66203762017-01-21 00:53:49 +00001621 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1622 Reg = AMDGPU::VCC;
1623 RegWidth = 2;
1624 return true;
1625 }
1626 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1627 Reg = AMDGPU::TBA;
1628 RegWidth = 2;
1629 return true;
1630 }
1631 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1632 Reg = AMDGPU::TMA;
1633 RegWidth = 2;
1634 return true;
1635 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001636 return false;
1637 case IS_VGPR:
1638 case IS_SGPR:
1639 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001640 if (Reg1 != Reg + RegWidth) {
1641 return false;
1642 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001643 RegWidth++;
1644 return true;
1645 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001646 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001647 }
1648}
1649
Eugene Zelenko66203762017-01-21 00:53:49 +00001650bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1651 unsigned &RegNum, unsigned &RegWidth,
1652 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001653 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001654 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1655 if (getLexer().is(AsmToken::Identifier)) {
1656 StringRef RegName = Parser.getTok().getString();
1657 if ((Reg = getSpecialRegForName(RegName))) {
1658 Parser.Lex();
1659 RegKind = IS_SPECIAL;
1660 } else {
1661 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001662 if (RegName[0] == 'v') {
1663 RegNumIndex = 1;
1664 RegKind = IS_VGPR;
1665 } else if (RegName[0] == 's') {
1666 RegNumIndex = 1;
1667 RegKind = IS_SGPR;
1668 } else if (RegName.startswith("ttmp")) {
1669 RegNumIndex = strlen("ttmp");
1670 RegKind = IS_TTMP;
1671 } else {
1672 return false;
1673 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001674 if (RegName.size() > RegNumIndex) {
1675 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001676 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1677 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001678 Parser.Lex();
1679 RegWidth = 1;
1680 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001681 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001682 Parser.Lex();
1683 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001684 if (getLexer().isNot(AsmToken::LBrac))
1685 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001686 Parser.Lex();
1687
Artem Tamazovf88397c2016-06-03 14:41:17 +00001688 if (getParser().parseAbsoluteExpression(RegLo))
1689 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001690
Artem Tamazov7da9b822016-05-27 12:50:13 +00001691 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001692 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1693 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001694 Parser.Lex();
1695
Artem Tamazov7da9b822016-05-27 12:50:13 +00001696 if (isRBrace) {
1697 RegHi = RegLo;
1698 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001699 if (getParser().parseAbsoluteExpression(RegHi))
1700 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001701
Artem Tamazovf88397c2016-06-03 14:41:17 +00001702 if (getLexer().isNot(AsmToken::RBrac))
1703 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001704 Parser.Lex();
1705 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001706 RegNum = (unsigned) RegLo;
1707 RegWidth = (RegHi - RegLo) + 1;
1708 }
1709 }
1710 } else if (getLexer().is(AsmToken::LBrac)) {
1711 // List of consecutive registers: [s0,s1,s2,s3]
1712 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001713 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001714 return false;
1715 if (RegWidth != 1)
1716 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001717 RegisterKind RegKind1;
1718 unsigned Reg1, RegNum1, RegWidth1;
1719 do {
1720 if (getLexer().is(AsmToken::Comma)) {
1721 Parser.Lex();
1722 } else if (getLexer().is(AsmToken::RBrac)) {
1723 Parser.Lex();
1724 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001725 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001726 if (RegWidth1 != 1) {
1727 return false;
1728 }
1729 if (RegKind1 != RegKind) {
1730 return false;
1731 }
1732 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1733 return false;
1734 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001735 } else {
1736 return false;
1737 }
1738 } while (true);
1739 } else {
1740 return false;
1741 }
1742 switch (RegKind) {
1743 case IS_SPECIAL:
1744 RegNum = 0;
1745 RegWidth = 1;
1746 break;
1747 case IS_VGPR:
1748 case IS_SGPR:
1749 case IS_TTMP:
1750 {
1751 unsigned Size = 1;
1752 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001753 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001754 Size = std::min(RegWidth, 4u);
1755 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001756 if (RegNum % Size != 0)
1757 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001758 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001759 RegNum = RegNum / Size;
1760 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001761 if (RCID == -1)
1762 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001763 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001764 if (RegNum >= RC.getNumRegs())
1765 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001766 Reg = RC.getRegister(RegNum);
1767 break;
1768 }
1769
1770 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001771 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001772 }
1773
Artem Tamazovf88397c2016-06-03 14:41:17 +00001774 if (!subtargetHasRegister(*TRI, Reg))
1775 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001776 return true;
1777}
1778
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001779std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001780 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001781 SMLoc StartLoc = Tok.getLoc();
1782 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001783 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001784 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001785
Artem Tamazova01cce82016-12-27 16:00:11 +00001786 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001787 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001788 }
Artem Tamazova01cce82016-12-27 16:00:11 +00001789 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001790 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001791}
1792
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001793bool
1794AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1795 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1796 (getLexer().getKind() == AsmToken::Integer ||
1797 getLexer().getKind() == AsmToken::Real)) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001798 // This is a workaround for handling operands like these:
1799 // |1.0|
1800 // |-1|
1801 // This syntax is not compatible with syntax of standard
1802 // MC expressions (due to the trailing '|').
1803
1804 SMLoc EndLoc;
1805 const MCExpr *Expr;
1806
1807 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1808 return true;
1809 }
1810
1811 return !Expr->evaluateAsAbsolute(Val);
1812 }
1813
1814 return getParser().parseAbsoluteExpression(Val);
1815}
1816
Alex Bradbury58eba092016-11-01 16:32:05 +00001817OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001818AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001819 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001820 bool Minus = false;
1821 if (getLexer().getKind() == AsmToken::Minus) {
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00001822 const AsmToken NextToken = getLexer().peekTok();
1823 if (!NextToken.is(AsmToken::Integer) &&
1824 !NextToken.is(AsmToken::Real)) {
1825 return MatchOperand_NoMatch;
1826 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001827 Minus = true;
1828 Parser.Lex();
1829 }
1830
1831 SMLoc S = Parser.getTok().getLoc();
1832 switch(getLexer().getKind()) {
1833 case AsmToken::Integer: {
1834 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001835 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001836 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001837 if (Minus)
1838 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001839 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001840 return MatchOperand_Success;
1841 }
1842 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001843 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001844 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001845 return MatchOperand_ParseFail;
1846
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001847 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001848 if (Minus)
1849 F.changeSign();
1850 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001851 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001852 AMDGPUOperand::ImmTyNone, true));
1853 return MatchOperand_Success;
1854 }
1855 default:
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00001856 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001857 }
1858}
1859
Alex Bradbury58eba092016-11-01 16:32:05 +00001860OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00001861AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001862 if (auto R = parseRegister()) {
1863 assert(R->isReg());
1864 R->Reg.IsForcedVOP3 = isForcedVOP3();
1865 Operands.push_back(std::move(R));
1866 return MatchOperand_Success;
1867 }
Sam Kolton9772eb32017-01-11 11:46:30 +00001868 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001869}
1870
Alex Bradbury58eba092016-11-01 16:32:05 +00001871OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001872AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1873 auto res = parseImm(Operands, AbsMod);
Sam Kolton9772eb32017-01-11 11:46:30 +00001874 if (res != MatchOperand_NoMatch) {
1875 return res;
1876 }
1877
1878 return parseReg(Operands);
1879}
1880
1881OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001882AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1883 bool AllowImm) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001884 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001885
1886 if (getLexer().getKind()== AsmToken::Minus) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001887 const AsmToken NextToken = getLexer().peekTok();
1888
1889 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1890 if (NextToken.is(AsmToken::Minus)) {
1891 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1892 return MatchOperand_ParseFail;
1893 }
1894
1895 // '-' followed by an integer literal N should be interpreted as integer
1896 // negation rather than a floating-point NEG modifier applied to N.
1897 // Beside being contr-intuitive, such use of floating-point NEG modifier
1898 // results in different meaning of integer literals used with VOP1/2/C
1899 // and VOP3, for example:
1900 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1901 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1902 // Negative fp literals should be handled likewise for unifomtity
1903 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1904 Parser.Lex();
1905 Negate = true;
1906 }
1907 }
1908
1909 if (getLexer().getKind() == AsmToken::Identifier &&
1910 Parser.getTok().getString() == "neg") {
1911 if (Negate) {
1912 Error(Parser.getTok().getLoc(), "expected register or immediate");
1913 return MatchOperand_ParseFail;
1914 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001915 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001916 Negate2 = true;
1917 if (getLexer().isNot(AsmToken::LParen)) {
1918 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1919 return MatchOperand_ParseFail;
1920 }
1921 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001922 }
1923
Eugene Zelenko66203762017-01-21 00:53:49 +00001924 if (getLexer().getKind() == AsmToken::Identifier &&
1925 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001926 Parser.Lex();
1927 Abs2 = true;
1928 if (getLexer().isNot(AsmToken::LParen)) {
1929 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1930 return MatchOperand_ParseFail;
1931 }
1932 Parser.Lex();
1933 }
1934
1935 if (getLexer().getKind() == AsmToken::Pipe) {
1936 if (Abs2) {
1937 Error(Parser.getTok().getLoc(), "expected register or immediate");
1938 return MatchOperand_ParseFail;
1939 }
1940 Parser.Lex();
1941 Abs = true;
1942 }
1943
Sam Kolton9772eb32017-01-11 11:46:30 +00001944 OperandMatchResultTy Res;
1945 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001946 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00001947 } else {
1948 Res = parseReg(Operands);
1949 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001950 if (Res != MatchOperand_Success) {
1951 return Res;
1952 }
1953
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001954 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001955 if (Abs) {
1956 if (getLexer().getKind() != AsmToken::Pipe) {
1957 Error(Parser.getTok().getLoc(), "expected vertical bar");
1958 return MatchOperand_ParseFail;
1959 }
1960 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001961 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001962 }
1963 if (Abs2) {
1964 if (getLexer().isNot(AsmToken::RParen)) {
1965 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1966 return MatchOperand_ParseFail;
1967 }
1968 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001969 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001970 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001971
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001972 if (Negate) {
1973 Mods.Neg = true;
1974 } else if (Negate2) {
1975 if (getLexer().isNot(AsmToken::RParen)) {
1976 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1977 return MatchOperand_ParseFail;
1978 }
1979 Parser.Lex();
1980 Mods.Neg = true;
1981 }
1982
Sam Kolton945231a2016-06-10 09:57:59 +00001983 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001984 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001985 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001986 }
1987 return MatchOperand_Success;
1988}
1989
Alex Bradbury58eba092016-11-01 16:32:05 +00001990OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001991AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
1992 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00001993 bool Sext = false;
1994
Eugene Zelenko66203762017-01-21 00:53:49 +00001995 if (getLexer().getKind() == AsmToken::Identifier &&
1996 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00001997 Parser.Lex();
1998 Sext = true;
1999 if (getLexer().isNot(AsmToken::LParen)) {
2000 Error(Parser.getTok().getLoc(), "expected left paren after sext");
2001 return MatchOperand_ParseFail;
2002 }
2003 Parser.Lex();
2004 }
2005
Sam Kolton9772eb32017-01-11 11:46:30 +00002006 OperandMatchResultTy Res;
2007 if (AllowImm) {
2008 Res = parseRegOrImm(Operands);
2009 } else {
2010 Res = parseReg(Operands);
2011 }
Sam Kolton945231a2016-06-10 09:57:59 +00002012 if (Res != MatchOperand_Success) {
2013 return Res;
2014 }
2015
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002016 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00002017 if (Sext) {
2018 if (getLexer().isNot(AsmToken::RParen)) {
2019 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2020 return MatchOperand_ParseFail;
2021 }
2022 Parser.Lex();
2023 Mods.Sext = true;
2024 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002025
Sam Kolton945231a2016-06-10 09:57:59 +00002026 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002027 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002028 Op.setModifiers(Mods);
2029 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002030
Sam Kolton945231a2016-06-10 09:57:59 +00002031 return MatchOperand_Success;
2032}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002033
Sam Kolton9772eb32017-01-11 11:46:30 +00002034OperandMatchResultTy
2035AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2036 return parseRegOrImmWithFPInputMods(Operands, false);
2037}
2038
2039OperandMatchResultTy
2040AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2041 return parseRegOrImmWithIntInputMods(Operands, false);
2042}
2043
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002044OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
2045 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2046 if (Reg) {
2047 Operands.push_back(std::move(Reg));
2048 return MatchOperand_Success;
2049 }
2050
2051 const AsmToken &Tok = Parser.getTok();
2052 if (Tok.getString() == "off") {
2053 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
2054 AMDGPUOperand::ImmTyOff, false));
2055 Parser.Lex();
2056 return MatchOperand_Success;
2057 }
2058
2059 return MatchOperand_NoMatch;
2060}
2061
Tom Stellard45bb48e2015-06-13 03:28:10 +00002062unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002063 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2064
2065 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002066 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2067 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2068 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002069 return Match_InvalidOperand;
2070
Tom Stellard88e0b252015-10-06 15:57:53 +00002071 if ((TSFlags & SIInstrFlags::VOP3) &&
2072 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2073 getForcedEncodingSize() != 64)
2074 return Match_PreferE32;
2075
Sam Koltona568e3d2016-12-22 12:57:41 +00002076 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2077 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002078 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002079 auto OpNum =
2080 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002081 const auto &Op = Inst.getOperand(OpNum);
2082 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2083 return Match_InvalidOperand;
2084 }
2085 }
2086
Matt Arsenaultfd023142017-06-12 15:55:58 +00002087 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2088 // FIXME: Produces error without correct column reported.
2089 auto OpNum =
2090 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2091 const auto &Op = Inst.getOperand(OpNum);
2092 if (Op.getImm() != 0)
2093 return Match_InvalidOperand;
2094 }
2095
Tom Stellard45bb48e2015-06-13 03:28:10 +00002096 return Match_Success;
2097}
2098
Matt Arsenault5f45e782017-01-09 18:44:11 +00002099// What asm variants we should check
2100ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2101 if (getForcedEncodingSize() == 32) {
2102 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2103 return makeArrayRef(Variants);
2104 }
2105
2106 if (isForcedVOP3()) {
2107 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2108 return makeArrayRef(Variants);
2109 }
2110
2111 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002112 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2113 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002114 return makeArrayRef(Variants);
2115 }
2116
2117 if (isForcedDPP()) {
2118 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2119 return makeArrayRef(Variants);
2120 }
2121
2122 static const unsigned Variants[] = {
2123 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002124 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002125 };
2126
2127 return makeArrayRef(Variants);
2128}
2129
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002130unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2131 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2132 const unsigned Num = Desc.getNumImplicitUses();
2133 for (unsigned i = 0; i < Num; ++i) {
2134 unsigned Reg = Desc.ImplicitUses[i];
2135 switch (Reg) {
2136 case AMDGPU::FLAT_SCR:
2137 case AMDGPU::VCC:
2138 case AMDGPU::M0:
2139 return Reg;
2140 default:
2141 break;
2142 }
2143 }
2144 return AMDGPU::NoRegister;
2145}
2146
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002147// NB: This code is correct only when used to check constant
2148// bus limitations because GFX7 support no f16 inline constants.
2149// Note that there are no cases when a GFX7 opcode violates
2150// constant bus limitations due to the use of an f16 constant.
2151bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2152 unsigned OpIdx) const {
2153 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2154
2155 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2156 return false;
2157 }
2158
2159 const MCOperand &MO = Inst.getOperand(OpIdx);
2160
2161 int64_t Val = MO.getImm();
2162 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2163
2164 switch (OpSize) { // expected operand size
2165 case 8:
2166 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2167 case 4:
2168 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2169 case 2: {
2170 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2171 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2172 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2173 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2174 } else {
2175 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2176 }
2177 }
2178 default:
2179 llvm_unreachable("invalid operand size");
2180 }
2181}
2182
2183bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2184 const MCOperand &MO = Inst.getOperand(OpIdx);
2185 if (MO.isImm()) {
2186 return !isInlineConstant(Inst, OpIdx);
2187 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002188 return !MO.isReg() ||
2189 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002190}
2191
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002192bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002193 const unsigned Opcode = Inst.getOpcode();
2194 const MCInstrDesc &Desc = MII.get(Opcode);
2195 unsigned ConstantBusUseCount = 0;
2196
2197 if (Desc.TSFlags &
2198 (SIInstrFlags::VOPC |
2199 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002200 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2201 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002202 // Check special imm operands (used by madmk, etc)
2203 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2204 ++ConstantBusUseCount;
2205 }
2206
2207 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2208 if (SGPRUsed != AMDGPU::NoRegister) {
2209 ++ConstantBusUseCount;
2210 }
2211
2212 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2213 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2214 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2215
2216 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2217
2218 for (int OpIdx : OpIndices) {
2219 if (OpIdx == -1) break;
2220
2221 const MCOperand &MO = Inst.getOperand(OpIdx);
2222 if (usesConstantBus(Inst, OpIdx)) {
2223 if (MO.isReg()) {
2224 const unsigned Reg = mc2PseudoReg(MO.getReg());
2225 // Pairs of registers with a partial intersections like these
2226 // s0, s[0:1]
2227 // flat_scratch_lo, flat_scratch
2228 // flat_scratch_lo, flat_scratch_hi
2229 // are theoretically valid but they are disabled anyway.
2230 // Note that this code mimics SIInstrInfo::verifyInstruction
2231 if (Reg != SGPRUsed) {
2232 ++ConstantBusUseCount;
2233 }
2234 SGPRUsed = Reg;
2235 } else { // Expression or a literal
2236 ++ConstantBusUseCount;
2237 }
2238 }
2239 }
2240 }
2241
2242 return ConstantBusUseCount <= 1;
2243}
2244
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002245bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002246 const unsigned Opcode = Inst.getOpcode();
2247 const MCInstrDesc &Desc = MII.get(Opcode);
2248
2249 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2250 if (DstIdx == -1 ||
2251 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2252 return true;
2253 }
2254
2255 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2256
2257 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2258 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2259 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2260
2261 assert(DstIdx != -1);
2262 const MCOperand &Dst = Inst.getOperand(DstIdx);
2263 assert(Dst.isReg());
2264 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2265
2266 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2267
2268 for (int SrcIdx : SrcIndices) {
2269 if (SrcIdx == -1) break;
2270 const MCOperand &Src = Inst.getOperand(SrcIdx);
2271 if (Src.isReg()) {
2272 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2273 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2274 return false;
2275 }
2276 }
2277 }
2278
2279 return true;
2280}
2281
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002282bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2283
2284 const unsigned Opc = Inst.getOpcode();
2285 const MCInstrDesc &Desc = MII.get(Opc);
2286
2287 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2288 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2289 assert(ClampIdx != -1);
2290 return Inst.getOperand(ClampIdx).getImm() == 0;
2291 }
2292
2293 return true;
2294}
2295
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002296bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2297
2298 const unsigned Opc = Inst.getOpcode();
2299 const MCInstrDesc &Desc = MII.get(Opc);
2300
2301 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2302 return true;
2303
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002304 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2305 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2306 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2307
2308 assert(VDataIdx != -1);
2309 assert(DMaskIdx != -1);
2310 assert(TFEIdx != -1);
2311
2312 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2313 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2314 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2315 if (DMask == 0)
2316 DMask = 1;
2317
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002318 unsigned DataSize =
2319 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2320 if (hasPackedD16()) {
2321 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2322 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2323 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002324 }
2325
2326 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002327}
2328
2329bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2330
2331 const unsigned Opc = Inst.getOpcode();
2332 const MCInstrDesc &Desc = MII.get(Opc);
2333
2334 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2335 return true;
2336 if (!Desc.mayLoad() || !Desc.mayStore())
2337 return true; // Not atomic
2338
2339 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2340 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2341
2342 // This is an incomplete check because image_atomic_cmpswap
2343 // may only use 0x3 and 0xf while other atomic operations
2344 // may use 0x1 and 0x3. However these limitations are
2345 // verified when we check that dmask matches dst size.
2346 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2347}
2348
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002349bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
2350
2351 const unsigned Opc = Inst.getOpcode();
2352 const MCInstrDesc &Desc = MII.get(Opc);
2353
2354 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
2355 return true;
2356
2357 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2358 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2359
2360 // GATHER4 instructions use dmask in a different fashion compared to
2361 // other MIMG instructions. The only useful DMASK values are
2362 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2363 // (red,red,red,red) etc.) The ISA document doesn't mention
2364 // this.
2365 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
2366}
2367
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002368bool AMDGPUAsmParser::validateMIMGR128(const MCInst &Inst) {
2369
2370 const unsigned Opc = Inst.getOpcode();
2371 const MCInstrDesc &Desc = MII.get(Opc);
2372
2373 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2374 return true;
2375
2376 int Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::r128);
2377 assert(Idx != -1);
2378
2379 bool R128 = (Inst.getOperand(Idx).getImm() != 0);
2380
2381 return !R128 || hasMIMG_R128();
2382}
2383
2384bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2385
2386 const unsigned Opc = Inst.getOpcode();
2387 const MCInstrDesc &Desc = MII.get(Opc);
2388
2389 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2390 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002391
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002392 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2393 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
2394 if (isCI() || isSI())
2395 return false;
2396 }
2397
2398 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002399}
2400
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002401bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2402 const SMLoc &IDLoc) {
2403 if (!validateConstantBusLimitations(Inst)) {
2404 Error(IDLoc,
2405 "invalid operand (violates constant bus restrictions)");
2406 return false;
2407 }
2408 if (!validateEarlyClobberLimitations(Inst)) {
2409 Error(IDLoc,
2410 "destination must be different than all sources");
2411 return false;
2412 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002413 if (!validateIntClampSupported(Inst)) {
2414 Error(IDLoc,
2415 "integer clamping is not supported on this GPU");
2416 return false;
2417 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002418 if (!validateMIMGR128(Inst)) {
2419 Error(IDLoc,
2420 "r128 modifier is not supported on this GPU");
2421 return false;
2422 }
2423 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
2424 if (!validateMIMGD16(Inst)) {
2425 Error(IDLoc,
2426 "d16 modifier is not supported on this GPU");
2427 return false;
2428 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002429 if (!validateMIMGDataSize(Inst)) {
2430 Error(IDLoc,
2431 "image data size does not match dmask and tfe");
2432 return false;
2433 }
2434 if (!validateMIMGAtomicDMask(Inst)) {
2435 Error(IDLoc,
2436 "invalid atomic image dmask");
2437 return false;
2438 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002439 if (!validateMIMGGatherDMask(Inst)) {
2440 Error(IDLoc,
2441 "invalid image_gather dmask: only one bit must be set");
2442 return false;
2443 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002444
2445 return true;
2446}
2447
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002448static std::string AMDGPUMnemonicSpellCheck(StringRef S, uint64_t FBS,
2449 unsigned VariantID = 0);
2450
Tom Stellard45bb48e2015-06-13 03:28:10 +00002451bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2452 OperandVector &Operands,
2453 MCStreamer &Out,
2454 uint64_t &ErrorInfo,
2455 bool MatchingInlineAsm) {
2456 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002457 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002458 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002459 uint64_t EI;
2460 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2461 Variant);
2462 // We order match statuses from least to most specific. We use most specific
2463 // status as resulting
2464 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2465 if ((R == Match_Success) ||
2466 (R == Match_PreferE32) ||
2467 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2468 (R == Match_InvalidOperand && Result != Match_MissingFeature
2469 && Result != Match_PreferE32) ||
2470 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2471 && Result != Match_MissingFeature
2472 && Result != Match_PreferE32)) {
2473 Result = R;
2474 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002475 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002476 if (R == Match_Success)
2477 break;
2478 }
2479
2480 switch (Result) {
2481 default: break;
2482 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002483 if (!validateInstruction(Inst, IDLoc)) {
2484 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002485 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002486 Inst.setLoc(IDLoc);
2487 Out.EmitInstruction(Inst, getSTI());
2488 return false;
2489
2490 case Match_MissingFeature:
2491 return Error(IDLoc, "instruction not supported on this GPU");
2492
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002493 case Match_MnemonicFail: {
2494 uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
2495 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2496 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2497 return Error(IDLoc, "invalid instruction" + Suggestion,
2498 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2499 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002500
2501 case Match_InvalidOperand: {
2502 SMLoc ErrorLoc = IDLoc;
2503 if (ErrorInfo != ~0ULL) {
2504 if (ErrorInfo >= Operands.size()) {
2505 return Error(IDLoc, "too few operands for instruction");
2506 }
2507 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2508 if (ErrorLoc == SMLoc())
2509 ErrorLoc = IDLoc;
2510 }
2511 return Error(ErrorLoc, "invalid operand for instruction");
2512 }
2513
2514 case Match_PreferE32:
2515 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2516 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002517 }
2518 llvm_unreachable("Implement any new match types added!");
2519}
2520
Artem Tamazov25478d82016-12-29 15:41:52 +00002521bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2522 int64_t Tmp = -1;
2523 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2524 return true;
2525 }
2526 if (getParser().parseAbsoluteExpression(Tmp)) {
2527 return true;
2528 }
2529 Ret = static_cast<uint32_t>(Tmp);
2530 return false;
2531}
2532
Tom Stellard347ac792015-06-26 21:15:07 +00002533bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2534 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002535 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002536 return TokError("invalid major version");
2537
Tom Stellard347ac792015-06-26 21:15:07 +00002538 if (getLexer().isNot(AsmToken::Comma))
2539 return TokError("minor version number required, comma expected");
2540 Lex();
2541
Artem Tamazov25478d82016-12-29 15:41:52 +00002542 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002543 return TokError("invalid minor version");
2544
Tom Stellard347ac792015-06-26 21:15:07 +00002545 return false;
2546}
2547
2548bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00002549 uint32_t Major;
2550 uint32_t Minor;
2551
2552 if (ParseDirectiveMajorMinor(Major, Minor))
2553 return true;
2554
2555 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
2556 return false;
2557}
2558
2559bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00002560 uint32_t Major;
2561 uint32_t Minor;
2562 uint32_t Stepping;
2563 StringRef VendorName;
2564 StringRef ArchName;
2565
2566 // If this directive has no arguments, then use the ISA version for the
2567 // targeted GPU.
2568 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002569 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002570 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002571 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2572 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00002573 "AMD", "AMDGPU");
2574 return false;
2575 }
2576
Tom Stellard347ac792015-06-26 21:15:07 +00002577 if (ParseDirectiveMajorMinor(Major, Minor))
2578 return true;
2579
2580 if (getLexer().isNot(AsmToken::Comma))
2581 return TokError("stepping version number required, comma expected");
2582 Lex();
2583
Artem Tamazov25478d82016-12-29 15:41:52 +00002584 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00002585 return TokError("invalid stepping version");
2586
Tom Stellard347ac792015-06-26 21:15:07 +00002587 if (getLexer().isNot(AsmToken::Comma))
2588 return TokError("vendor name required, comma expected");
2589 Lex();
2590
2591 if (getLexer().isNot(AsmToken::String))
2592 return TokError("invalid vendor name");
2593
2594 VendorName = getLexer().getTok().getStringContents();
2595 Lex();
2596
2597 if (getLexer().isNot(AsmToken::Comma))
2598 return TokError("arch name required, comma expected");
2599 Lex();
2600
2601 if (getLexer().isNot(AsmToken::String))
2602 return TokError("invalid arch name");
2603
2604 ArchName = getLexer().getTok().getStringContents();
2605 Lex();
2606
2607 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
2608 VendorName, ArchName);
2609 return false;
2610}
2611
Tom Stellardff7416b2015-06-26 21:58:31 +00002612bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2613 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00002614 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
2615 // assembly for backwards compatibility.
2616 if (ID == "max_scratch_backing_memory_byte_size") {
2617 Parser.eatToEndOfStatement();
2618 return false;
2619 }
2620
Valery Pykhtindc110542016-03-06 20:25:36 +00002621 SmallString<40> ErrStr;
2622 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00002623 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002624 return TokError(Err.str());
2625 }
Tom Stellardff7416b2015-06-26 21:58:31 +00002626 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00002627 return false;
2628}
2629
2630bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00002631 amd_kernel_code_t Header;
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002632 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00002633
2634 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00002635 // Lex EndOfStatement. This is in a while loop, because lexing a comment
2636 // will set the current token to EndOfStatement.
2637 while(getLexer().is(AsmToken::EndOfStatement))
2638 Lex();
2639
2640 if (getLexer().isNot(AsmToken::Identifier))
2641 return TokError("expected value identifier or .end_amd_kernel_code_t");
2642
2643 StringRef ID = getLexer().getTok().getIdentifier();
2644 Lex();
2645
2646 if (ID == ".end_amd_kernel_code_t")
2647 break;
2648
2649 if (ParseAMDKernelCodeTValue(ID, Header))
2650 return true;
2651 }
2652
2653 getTargetStreamer().EmitAMDKernelCodeT(Header);
2654
2655 return false;
2656}
2657
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002658bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
2659 if (getLexer().isNot(AsmToken::Identifier))
2660 return TokError("expected symbol name");
2661
2662 StringRef KernelName = Parser.getTok().getString();
2663
2664 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
2665 ELF::STT_AMDGPU_HSA_KERNEL);
2666 Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00002667 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002668 return false;
2669}
2670
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00002671bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00002672 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
2673 return Error(getParser().getTok().getLoc(),
2674 ".amd_amdgpu_isa directive is not available on non-amdgcn "
2675 "architectures");
2676 }
2677
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00002678 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
2679
2680 std::string ISAVersionStringFromSTI;
2681 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
2682 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
2683
2684 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
2685 return Error(getParser().getTok().getLoc(),
2686 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
2687 "arguments specified through the command line");
2688 }
2689
2690 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
2691 Lex();
2692
2693 return false;
2694}
2695
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00002696bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00002697 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
2698 return Error(getParser().getTok().getLoc(),
2699 (Twine(HSAMD::AssemblerDirectiveBegin) + Twine(" directive is "
2700 "not available on non-amdhsa OSes")).str());
2701 }
2702
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00002703 std::string HSAMetadataString;
2704 raw_string_ostream YamlStream(HSAMetadataString);
2705
2706 getLexer().setSkipSpace(false);
2707
2708 bool FoundEnd = false;
2709 while (!getLexer().is(AsmToken::Eof)) {
2710 while (getLexer().is(AsmToken::Space)) {
2711 YamlStream << getLexer().getTok().getString();
2712 Lex();
2713 }
2714
2715 if (getLexer().is(AsmToken::Identifier)) {
2716 StringRef ID = getLexer().getTok().getIdentifier();
2717 if (ID == AMDGPU::HSAMD::AssemblerDirectiveEnd) {
2718 Lex();
2719 FoundEnd = true;
2720 break;
2721 }
2722 }
2723
2724 YamlStream << Parser.parseStringToEndOfStatement()
2725 << getContext().getAsmInfo()->getSeparatorString();
2726
2727 Parser.eatToEndOfStatement();
2728 }
2729
2730 getLexer().setSkipSpace(true);
2731
2732 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
2733 return TokError(Twine("expected directive ") +
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00002734 Twine(HSAMD::AssemblerDirectiveEnd) + Twine(" not found"));
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00002735 }
2736
2737 YamlStream.flush();
2738
2739 if (!getTargetStreamer().EmitHSAMetadata(HSAMetadataString))
2740 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
2741
2742 return false;
2743}
2744
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00002745bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00002746 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
2747 return Error(getParser().getTok().getLoc(),
2748 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
2749 "not available on non-amdpal OSes")).str());
2750 }
2751
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00002752 PALMD::Metadata PALMetadata;
Tim Renouf72800f02017-10-03 19:03:52 +00002753 for (;;) {
2754 uint32_t Value;
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00002755 if (ParseAsAbsoluteExpression(Value)) {
2756 return TokError(Twine("invalid value in ") +
2757 Twine(PALMD::AssemblerDirective));
2758 }
2759 PALMetadata.push_back(Value);
Tim Renouf72800f02017-10-03 19:03:52 +00002760 if (getLexer().isNot(AsmToken::Comma))
2761 break;
2762 Lex();
2763 }
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00002764 getTargetStreamer().EmitPALMetadata(PALMetadata);
Tim Renouf72800f02017-10-03 19:03:52 +00002765 return false;
2766}
2767
Tom Stellard45bb48e2015-06-13 03:28:10 +00002768bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00002769 StringRef IDVal = DirectiveID.getString();
2770
2771 if (IDVal == ".hsa_code_object_version")
2772 return ParseDirectiveHSACodeObjectVersion();
2773
2774 if (IDVal == ".hsa_code_object_isa")
2775 return ParseDirectiveHSACodeObjectISA();
2776
Tom Stellardff7416b2015-06-26 21:58:31 +00002777 if (IDVal == ".amd_kernel_code_t")
2778 return ParseDirectiveAMDKernelCodeT();
2779
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002780 if (IDVal == ".amdgpu_hsa_kernel")
2781 return ParseDirectiveAMDGPUHsaKernel();
2782
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00002783 if (IDVal == ".amd_amdgpu_isa")
2784 return ParseDirectiveISAVersion();
2785
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00002786 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
2787 return ParseDirectiveHSAMetadata();
2788
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00002789 if (IDVal == PALMD::AssemblerDirective)
2790 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00002791
Tom Stellard45bb48e2015-06-13 03:28:10 +00002792 return true;
2793}
2794
Matt Arsenault68802d32015-11-05 03:11:27 +00002795bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
2796 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00002797
2798 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
2799 R.isValid(); ++R) {
2800 if (*R == RegNo)
2801 return isGFX9();
2802 }
2803
2804 switch (RegNo) {
2805 case AMDGPU::TBA:
2806 case AMDGPU::TBA_LO:
2807 case AMDGPU::TBA_HI:
2808 case AMDGPU::TMA:
2809 case AMDGPU::TMA_LO:
2810 case AMDGPU::TMA_HI:
2811 return !isGFX9();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00002812 case AMDGPU::XNACK_MASK:
2813 case AMDGPU::XNACK_MASK_LO:
2814 case AMDGPU::XNACK_MASK_HI:
2815 return !isCI() && !isSI() && hasXNACK();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00002816 default:
2817 break;
2818 }
2819
Matt Arsenault3b159672015-12-01 20:31:08 +00002820 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00002821 return true;
2822
Matt Arsenault3b159672015-12-01 20:31:08 +00002823 if (isSI()) {
2824 // No flat_scr
2825 switch (RegNo) {
2826 case AMDGPU::FLAT_SCR:
2827 case AMDGPU::FLAT_SCR_LO:
2828 case AMDGPU::FLAT_SCR_HI:
2829 return false;
2830 default:
2831 return true;
2832 }
2833 }
2834
Matt Arsenault68802d32015-11-05 03:11:27 +00002835 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2836 // SI/CI have.
2837 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2838 R.isValid(); ++R) {
2839 if (*R == RegNo)
2840 return false;
2841 }
2842
2843 return true;
2844}
2845
Alex Bradbury58eba092016-11-01 16:32:05 +00002846OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002847AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002848 // Try to parse with a custom parser
2849 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2850
2851 // If we successfully parsed the operand or if there as an error parsing,
2852 // we are done.
2853 //
2854 // If we are parsing after we reach EndOfStatement then this means we
2855 // are appending default values to the Operands list. This is only done
2856 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002857 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00002858 getLexer().is(AsmToken::EndOfStatement))
2859 return ResTy;
2860
Sam Kolton1bdcef72016-05-23 09:59:02 +00002861 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00002862
Sam Kolton1bdcef72016-05-23 09:59:02 +00002863 if (ResTy == MatchOperand_Success)
2864 return ResTy;
2865
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00002866 const auto &Tok = Parser.getTok();
2867 SMLoc S = Tok.getLoc();
Tom Stellard89049702016-06-15 02:54:14 +00002868
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00002869 const MCExpr *Expr = nullptr;
2870 if (!Parser.parseExpression(Expr)) {
2871 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
2872 return MatchOperand_Success;
2873 }
2874
2875 // Possibly this is an instruction flag like 'gds'.
2876 if (Tok.getKind() == AsmToken::Identifier) {
2877 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002878 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002879 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002880 }
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00002881
Sam Kolton1bdcef72016-05-23 09:59:02 +00002882 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002883}
2884
Sam Kolton05ef1c92016-06-03 10:27:37 +00002885StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2886 // Clear any forced encodings from the previous instruction.
2887 setForcedEncodingSize(0);
2888 setForcedDPP(false);
2889 setForcedSDWA(false);
2890
2891 if (Name.endswith("_e64")) {
2892 setForcedEncodingSize(64);
2893 return Name.substr(0, Name.size() - 4);
2894 } else if (Name.endswith("_e32")) {
2895 setForcedEncodingSize(32);
2896 return Name.substr(0, Name.size() - 4);
2897 } else if (Name.endswith("_dpp")) {
2898 setForcedDPP(true);
2899 return Name.substr(0, Name.size() - 4);
2900 } else if (Name.endswith("_sdwa")) {
2901 setForcedSDWA(true);
2902 return Name.substr(0, Name.size() - 5);
2903 }
2904 return Name;
2905}
2906
Tom Stellard45bb48e2015-06-13 03:28:10 +00002907bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2908 StringRef Name,
2909 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002910 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00002911 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002912 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00002913
Tom Stellard45bb48e2015-06-13 03:28:10 +00002914 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00002915 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002916
2917 // Eat the comma or space if there is one.
2918 if (getLexer().is(AsmToken::Comma))
2919 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00002920
Tom Stellard45bb48e2015-06-13 03:28:10 +00002921 switch (Res) {
2922 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002923 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002924 Error(getLexer().getLoc(), "failed parsing operand.");
2925 while (!getLexer().is(AsmToken::EndOfStatement)) {
2926 Parser.Lex();
2927 }
2928 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002929 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002930 Error(getLexer().getLoc(), "not a valid operand.");
2931 while (!getLexer().is(AsmToken::EndOfStatement)) {
2932 Parser.Lex();
2933 }
2934 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002935 }
2936 }
2937
Tom Stellard45bb48e2015-06-13 03:28:10 +00002938 return false;
2939}
2940
2941//===----------------------------------------------------------------------===//
2942// Utility functions
2943//===----------------------------------------------------------------------===//
2944
Alex Bradbury58eba092016-11-01 16:32:05 +00002945OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002946AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002947 switch(getLexer().getKind()) {
2948 default: return MatchOperand_NoMatch;
2949 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002950 StringRef Name = Parser.getTok().getString();
2951 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002952 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002953 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002954
2955 Parser.Lex();
2956 if (getLexer().isNot(AsmToken::Colon))
2957 return MatchOperand_ParseFail;
2958
2959 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00002960
2961 bool IsMinus = false;
2962 if (getLexer().getKind() == AsmToken::Minus) {
2963 Parser.Lex();
2964 IsMinus = true;
2965 }
2966
Tom Stellard45bb48e2015-06-13 03:28:10 +00002967 if (getLexer().isNot(AsmToken::Integer))
2968 return MatchOperand_ParseFail;
2969
2970 if (getParser().parseAbsoluteExpression(Int))
2971 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00002972
2973 if (IsMinus)
2974 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002975 break;
2976 }
2977 }
2978 return MatchOperand_Success;
2979}
2980
Alex Bradbury58eba092016-11-01 16:32:05 +00002981OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002982AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002983 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002984 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002985 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002986 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002987
Alex Bradbury58eba092016-11-01 16:32:05 +00002988 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002989 if (Res != MatchOperand_Success)
2990 return Res;
2991
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002992 if (ConvertResult && !ConvertResult(Value)) {
2993 return MatchOperand_ParseFail;
2994 }
2995
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002996 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002997 return MatchOperand_Success;
2998}
2999
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003000OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
3001 const char *Prefix,
3002 OperandVector &Operands,
3003 AMDGPUOperand::ImmTy ImmTy,
3004 bool (*ConvertResult)(int64_t&)) {
3005 StringRef Name = Parser.getTok().getString();
3006 if (!Name.equals(Prefix))
3007 return MatchOperand_NoMatch;
3008
3009 Parser.Lex();
3010 if (getLexer().isNot(AsmToken::Colon))
3011 return MatchOperand_ParseFail;
3012
3013 Parser.Lex();
3014 if (getLexer().isNot(AsmToken::LBrac))
3015 return MatchOperand_ParseFail;
3016 Parser.Lex();
3017
3018 unsigned Val = 0;
3019 SMLoc S = Parser.getTok().getLoc();
3020
3021 // FIXME: How to verify the number of elements matches the number of src
3022 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00003023 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003024 if (I != 0) {
3025 if (getLexer().is(AsmToken::RBrac))
3026 break;
3027
3028 if (getLexer().isNot(AsmToken::Comma))
3029 return MatchOperand_ParseFail;
3030 Parser.Lex();
3031 }
3032
3033 if (getLexer().isNot(AsmToken::Integer))
3034 return MatchOperand_ParseFail;
3035
3036 int64_t Op;
3037 if (getParser().parseAbsoluteExpression(Op))
3038 return MatchOperand_ParseFail;
3039
3040 if (Op != 0 && Op != 1)
3041 return MatchOperand_ParseFail;
3042 Val |= (Op << I);
3043 }
3044
3045 Parser.Lex();
3046 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
3047 return MatchOperand_Success;
3048}
3049
Alex Bradbury58eba092016-11-01 16:32:05 +00003050OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003051AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003052 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003053 int64_t Bit = 0;
3054 SMLoc S = Parser.getTok().getLoc();
3055
3056 // We are at the end of the statement, and this is a default argument, so
3057 // use a default value.
3058 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3059 switch(getLexer().getKind()) {
3060 case AsmToken::Identifier: {
3061 StringRef Tok = Parser.getTok().getString();
3062 if (Tok == Name) {
3063 Bit = 1;
3064 Parser.Lex();
3065 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
3066 Bit = 0;
3067 Parser.Lex();
3068 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00003069 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003070 }
3071 break;
3072 }
3073 default:
3074 return MatchOperand_NoMatch;
3075 }
3076 }
3077
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003078 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003079 return MatchOperand_Success;
3080}
3081
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003082static void addOptionalImmOperand(
3083 MCInst& Inst, const OperandVector& Operands,
3084 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
3085 AMDGPUOperand::ImmTy ImmT,
3086 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003087 auto i = OptionalIdx.find(ImmT);
3088 if (i != OptionalIdx.end()) {
3089 unsigned Idx = i->second;
3090 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
3091 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00003092 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003093 }
3094}
3095
Alex Bradbury58eba092016-11-01 16:32:05 +00003096OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003097AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00003098 if (getLexer().isNot(AsmToken::Identifier)) {
3099 return MatchOperand_NoMatch;
3100 }
3101 StringRef Tok = Parser.getTok().getString();
3102 if (Tok != Prefix) {
3103 return MatchOperand_NoMatch;
3104 }
3105
3106 Parser.Lex();
3107 if (getLexer().isNot(AsmToken::Colon)) {
3108 return MatchOperand_ParseFail;
3109 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003110
Sam Kolton3025e7f2016-04-26 13:33:56 +00003111 Parser.Lex();
3112 if (getLexer().isNot(AsmToken::Identifier)) {
3113 return MatchOperand_ParseFail;
3114 }
3115
3116 Value = Parser.getTok().getString();
3117 return MatchOperand_Success;
3118}
3119
Tom Stellard45bb48e2015-06-13 03:28:10 +00003120//===----------------------------------------------------------------------===//
3121// ds
3122//===----------------------------------------------------------------------===//
3123
Tom Stellard45bb48e2015-06-13 03:28:10 +00003124void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
3125 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003126 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003127
3128 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3129 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3130
3131 // Add the register arguments
3132 if (Op.isReg()) {
3133 Op.addRegOperands(Inst, 1);
3134 continue;
3135 }
3136
3137 // Handle optional arguments
3138 OptionalIdx[Op.getImmTy()] = i;
3139 }
3140
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003141 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
3142 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003143 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003144
Tom Stellard45bb48e2015-06-13 03:28:10 +00003145 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3146}
3147
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003148void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
3149 bool IsGdsHardcoded) {
3150 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003151
3152 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3153 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3154
3155 // Add the register arguments
3156 if (Op.isReg()) {
3157 Op.addRegOperands(Inst, 1);
3158 continue;
3159 }
3160
3161 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00003162 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003163 continue;
3164 }
3165
3166 // Handle optional arguments
3167 OptionalIdx[Op.getImmTy()] = i;
3168 }
3169
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003170 AMDGPUOperand::ImmTy OffsetType =
3171 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
3172 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
3173 AMDGPUOperand::ImmTyOffset;
3174
3175 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
3176
Artem Tamazov43b61562017-02-03 12:47:30 +00003177 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003178 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003179 }
3180 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3181}
3182
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003183void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
3184 OptionalImmIndexMap OptionalIdx;
3185
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003186 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003187 unsigned EnMask = 0;
3188 int SrcIdx = 0;
3189
3190 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3191 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3192
3193 // Add the register arguments
3194 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003195 assert(SrcIdx < 4);
3196 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003197 Op.addRegOperands(Inst, 1);
3198 ++SrcIdx;
3199 continue;
3200 }
3201
3202 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003203 assert(SrcIdx < 4);
3204 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003205 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003206 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003207 continue;
3208 }
3209
3210 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
3211 Op.addImmOperands(Inst, 1);
3212 continue;
3213 }
3214
3215 if (Op.isToken() && Op.getToken() == "done")
3216 continue;
3217
3218 // Handle optional arguments
3219 OptionalIdx[Op.getImmTy()] = i;
3220 }
3221
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003222 assert(SrcIdx == 4);
3223
3224 bool Compr = false;
3225 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
3226 Compr = true;
3227 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
3228 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
3229 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
3230 }
3231
3232 for (auto i = 0; i < SrcIdx; ++i) {
3233 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
3234 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
3235 }
3236 }
3237
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003238 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
3239 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
3240
3241 Inst.addOperand(MCOperand::createImm(EnMask));
3242}
Tom Stellard45bb48e2015-06-13 03:28:10 +00003243
3244//===----------------------------------------------------------------------===//
3245// s_waitcnt
3246//===----------------------------------------------------------------------===//
3247
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003248static bool
3249encodeCnt(
3250 const AMDGPU::IsaInfo::IsaVersion ISA,
3251 int64_t &IntVal,
3252 int64_t CntVal,
3253 bool Saturate,
3254 unsigned (*encode)(const IsaInfo::IsaVersion &Version, unsigned, unsigned),
3255 unsigned (*decode)(const IsaInfo::IsaVersion &Version, unsigned))
3256{
3257 bool Failed = false;
3258
3259 IntVal = encode(ISA, IntVal, CntVal);
3260 if (CntVal != decode(ISA, IntVal)) {
3261 if (Saturate) {
3262 IntVal = encode(ISA, IntVal, -1);
3263 } else {
3264 Failed = true;
3265 }
3266 }
3267 return Failed;
3268}
3269
Tom Stellard45bb48e2015-06-13 03:28:10 +00003270bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
3271 StringRef CntName = Parser.getTok().getString();
3272 int64_t CntVal;
3273
3274 Parser.Lex();
3275 if (getLexer().isNot(AsmToken::LParen))
3276 return true;
3277
3278 Parser.Lex();
3279 if (getLexer().isNot(AsmToken::Integer))
3280 return true;
3281
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003282 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00003283 if (getParser().parseAbsoluteExpression(CntVal))
3284 return true;
3285
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003286 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00003287 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003288
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003289 bool Failed = true;
3290 bool Sat = CntName.endswith("_sat");
3291
3292 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
3293 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
3294 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
3295 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
3296 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
3297 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
3298 }
3299
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003300 if (Failed) {
3301 Error(ValLoc, "too large value for " + CntName);
3302 return true;
3303 }
3304
3305 if (getLexer().isNot(AsmToken::RParen)) {
3306 return true;
3307 }
3308
3309 Parser.Lex();
3310 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
3311 const AsmToken NextToken = getLexer().peekTok();
3312 if (NextToken.is(AsmToken::Identifier)) {
3313 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003314 }
3315 }
3316
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003317 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003318}
3319
Alex Bradbury58eba092016-11-01 16:32:05 +00003320OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003321AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003322 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00003323 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003324 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003325 SMLoc S = Parser.getTok().getLoc();
3326
3327 switch(getLexer().getKind()) {
3328 default: return MatchOperand_ParseFail;
3329 case AsmToken::Integer:
3330 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003331 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003332 return MatchOperand_ParseFail;
3333 break;
3334
3335 case AsmToken::Identifier:
3336 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003337 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003338 return MatchOperand_ParseFail;
3339 } while(getLexer().isNot(AsmToken::EndOfStatement));
3340 break;
3341 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003342 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003343 return MatchOperand_Success;
3344}
3345
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003346bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
3347 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003348 using namespace llvm::AMDGPU::Hwreg;
3349
Artem Tamazovd6468662016-04-25 14:13:51 +00003350 if (Parser.getTok().getString() != "hwreg")
3351 return true;
3352 Parser.Lex();
3353
3354 if (getLexer().isNot(AsmToken::LParen))
3355 return true;
3356 Parser.Lex();
3357
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003358 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003359 HwReg.IsSymbolic = true;
3360 HwReg.Id = ID_UNKNOWN_;
3361 const StringRef tok = Parser.getTok().getString();
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00003362 int Last = ID_SYMBOLIC_LAST_;
3363 if (isSI() || isCI() || isVI())
3364 Last = ID_SYMBOLIC_FIRST_GFX9_;
3365 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003366 if (tok == IdSymbolic[i]) {
3367 HwReg.Id = i;
3368 break;
3369 }
3370 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003371 Parser.Lex();
3372 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003373 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003374 if (getLexer().isNot(AsmToken::Integer))
3375 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003376 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003377 return true;
3378 }
Artem Tamazovd6468662016-04-25 14:13:51 +00003379
3380 if (getLexer().is(AsmToken::RParen)) {
3381 Parser.Lex();
3382 return false;
3383 }
3384
3385 // optional params
3386 if (getLexer().isNot(AsmToken::Comma))
3387 return true;
3388 Parser.Lex();
3389
3390 if (getLexer().isNot(AsmToken::Integer))
3391 return true;
3392 if (getParser().parseAbsoluteExpression(Offset))
3393 return true;
3394
3395 if (getLexer().isNot(AsmToken::Comma))
3396 return true;
3397 Parser.Lex();
3398
3399 if (getLexer().isNot(AsmToken::Integer))
3400 return true;
3401 if (getParser().parseAbsoluteExpression(Width))
3402 return true;
3403
3404 if (getLexer().isNot(AsmToken::RParen))
3405 return true;
3406 Parser.Lex();
3407
3408 return false;
3409}
3410
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003411OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003412 using namespace llvm::AMDGPU::Hwreg;
3413
Artem Tamazovd6468662016-04-25 14:13:51 +00003414 int64_t Imm16Val = 0;
3415 SMLoc S = Parser.getTok().getLoc();
3416
3417 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00003418 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00003419 case AsmToken::Integer:
3420 // The operand can be an integer value.
3421 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00003422 return MatchOperand_NoMatch;
3423 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00003424 Error(S, "invalid immediate: only 16-bit values are legal");
3425 // Do not return error code, but create an imm operand anyway and proceed
3426 // to the next operand, if any. That avoids unneccessary error messages.
3427 }
3428 break;
3429
3430 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003431 OperandInfoTy HwReg(ID_UNKNOWN_);
3432 int64_t Offset = OFFSET_DEFAULT_;
3433 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3434 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00003435 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003436 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3437 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003438 Error(S, "invalid symbolic name of hardware register");
3439 else
3440 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00003441 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003442 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00003443 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003444 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00003445 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003446 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00003447 }
3448 break;
3449 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003450 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00003451 return MatchOperand_Success;
3452}
3453
Tom Stellard45bb48e2015-06-13 03:28:10 +00003454bool AMDGPUOperand::isSWaitCnt() const {
3455 return isImm();
3456}
3457
Artem Tamazovd6468662016-04-25 14:13:51 +00003458bool AMDGPUOperand::isHwreg() const {
3459 return isImmTy(ImmTyHwreg);
3460}
3461
Artem Tamazov6edc1352016-05-26 17:00:33 +00003462bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003463 using namespace llvm::AMDGPU::SendMsg;
3464
3465 if (Parser.getTok().getString() != "sendmsg")
3466 return true;
3467 Parser.Lex();
3468
3469 if (getLexer().isNot(AsmToken::LParen))
3470 return true;
3471 Parser.Lex();
3472
3473 if (getLexer().is(AsmToken::Identifier)) {
3474 Msg.IsSymbolic = true;
3475 Msg.Id = ID_UNKNOWN_;
3476 const std::string tok = Parser.getTok().getString();
3477 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3478 switch(i) {
3479 default: continue; // Omit gaps.
3480 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3481 }
3482 if (tok == IdSymbolic[i]) {
3483 Msg.Id = i;
3484 break;
3485 }
3486 }
3487 Parser.Lex();
3488 } else {
3489 Msg.IsSymbolic = false;
3490 if (getLexer().isNot(AsmToken::Integer))
3491 return true;
3492 if (getParser().parseAbsoluteExpression(Msg.Id))
3493 return true;
3494 if (getLexer().is(AsmToken::Integer))
3495 if (getParser().parseAbsoluteExpression(Msg.Id))
3496 Msg.Id = ID_UNKNOWN_;
3497 }
3498 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3499 return false;
3500
3501 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3502 if (getLexer().isNot(AsmToken::RParen))
3503 return true;
3504 Parser.Lex();
3505 return false;
3506 }
3507
3508 if (getLexer().isNot(AsmToken::Comma))
3509 return true;
3510 Parser.Lex();
3511
3512 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
3513 Operation.Id = ID_UNKNOWN_;
3514 if (getLexer().is(AsmToken::Identifier)) {
3515 Operation.IsSymbolic = true;
3516 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3517 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3518 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003519 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003520 for (int i = F; i < L; ++i) {
3521 if (Tok == S[i]) {
3522 Operation.Id = i;
3523 break;
3524 }
3525 }
3526 Parser.Lex();
3527 } else {
3528 Operation.IsSymbolic = false;
3529 if (getLexer().isNot(AsmToken::Integer))
3530 return true;
3531 if (getParser().parseAbsoluteExpression(Operation.Id))
3532 return true;
3533 }
3534
3535 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3536 // Stream id is optional.
3537 if (getLexer().is(AsmToken::RParen)) {
3538 Parser.Lex();
3539 return false;
3540 }
3541
3542 if (getLexer().isNot(AsmToken::Comma))
3543 return true;
3544 Parser.Lex();
3545
3546 if (getLexer().isNot(AsmToken::Integer))
3547 return true;
3548 if (getParser().parseAbsoluteExpression(StreamId))
3549 return true;
3550 }
3551
3552 if (getLexer().isNot(AsmToken::RParen))
3553 return true;
3554 Parser.Lex();
3555 return false;
3556}
3557
Matt Arsenault0e8a2992016-12-15 20:40:20 +00003558OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3559 if (getLexer().getKind() != AsmToken::Identifier)
3560 return MatchOperand_NoMatch;
3561
3562 StringRef Str = Parser.getTok().getString();
3563 int Slot = StringSwitch<int>(Str)
3564 .Case("p10", 0)
3565 .Case("p20", 1)
3566 .Case("p0", 2)
3567 .Default(-1);
3568
3569 SMLoc S = Parser.getTok().getLoc();
3570 if (Slot == -1)
3571 return MatchOperand_ParseFail;
3572
3573 Parser.Lex();
3574 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3575 AMDGPUOperand::ImmTyInterpSlot));
3576 return MatchOperand_Success;
3577}
3578
3579OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3580 if (getLexer().getKind() != AsmToken::Identifier)
3581 return MatchOperand_NoMatch;
3582
3583 StringRef Str = Parser.getTok().getString();
3584 if (!Str.startswith("attr"))
3585 return MatchOperand_NoMatch;
3586
3587 StringRef Chan = Str.take_back(2);
3588 int AttrChan = StringSwitch<int>(Chan)
3589 .Case(".x", 0)
3590 .Case(".y", 1)
3591 .Case(".z", 2)
3592 .Case(".w", 3)
3593 .Default(-1);
3594 if (AttrChan == -1)
3595 return MatchOperand_ParseFail;
3596
3597 Str = Str.drop_back(2).drop_front(4);
3598
3599 uint8_t Attr;
3600 if (Str.getAsInteger(10, Attr))
3601 return MatchOperand_ParseFail;
3602
3603 SMLoc S = Parser.getTok().getLoc();
3604 Parser.Lex();
3605 if (Attr > 63) {
3606 Error(S, "out of bounds attr");
3607 return MatchOperand_Success;
3608 }
3609
3610 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
3611
3612 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
3613 AMDGPUOperand::ImmTyInterpAttr));
3614 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
3615 AMDGPUOperand::ImmTyAttrChan));
3616 return MatchOperand_Success;
3617}
3618
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003619void AMDGPUAsmParser::errorExpTgt() {
3620 Error(Parser.getTok().getLoc(), "invalid exp target");
3621}
3622
3623OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
3624 uint8_t &Val) {
3625 if (Str == "null") {
3626 Val = 9;
3627 return MatchOperand_Success;
3628 }
3629
3630 if (Str.startswith("mrt")) {
3631 Str = Str.drop_front(3);
3632 if (Str == "z") { // == mrtz
3633 Val = 8;
3634 return MatchOperand_Success;
3635 }
3636
3637 if (Str.getAsInteger(10, Val))
3638 return MatchOperand_ParseFail;
3639
3640 if (Val > 7)
3641 errorExpTgt();
3642
3643 return MatchOperand_Success;
3644 }
3645
3646 if (Str.startswith("pos")) {
3647 Str = Str.drop_front(3);
3648 if (Str.getAsInteger(10, Val))
3649 return MatchOperand_ParseFail;
3650
3651 if (Val > 3)
3652 errorExpTgt();
3653
3654 Val += 12;
3655 return MatchOperand_Success;
3656 }
3657
3658 if (Str.startswith("param")) {
3659 Str = Str.drop_front(5);
3660 if (Str.getAsInteger(10, Val))
3661 return MatchOperand_ParseFail;
3662
3663 if (Val >= 32)
3664 errorExpTgt();
3665
3666 Val += 32;
3667 return MatchOperand_Success;
3668 }
3669
3670 if (Str.startswith("invalid_target_")) {
3671 Str = Str.drop_front(15);
3672 if (Str.getAsInteger(10, Val))
3673 return MatchOperand_ParseFail;
3674
3675 errorExpTgt();
3676 return MatchOperand_Success;
3677 }
3678
3679 return MatchOperand_NoMatch;
3680}
3681
3682OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
3683 uint8_t Val;
3684 StringRef Str = Parser.getTok().getString();
3685
3686 auto Res = parseExpTgtImpl(Str, Val);
3687 if (Res != MatchOperand_Success)
3688 return Res;
3689
3690 SMLoc S = Parser.getTok().getLoc();
3691 Parser.Lex();
3692
3693 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
3694 AMDGPUOperand::ImmTyExpTgt));
3695 return MatchOperand_Success;
3696}
3697
Alex Bradbury58eba092016-11-01 16:32:05 +00003698OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003699AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
3700 using namespace llvm::AMDGPU::SendMsg;
3701
3702 int64_t Imm16Val = 0;
3703 SMLoc S = Parser.getTok().getLoc();
3704
3705 switch(getLexer().getKind()) {
3706 default:
3707 return MatchOperand_NoMatch;
3708 case AsmToken::Integer:
3709 // The operand can be an integer value.
3710 if (getParser().parseAbsoluteExpression(Imm16Val))
3711 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003712 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003713 Error(S, "invalid immediate: only 16-bit values are legal");
3714 // Do not return error code, but create an imm operand anyway and proceed
3715 // to the next operand, if any. That avoids unneccessary error messages.
3716 }
3717 break;
3718 case AsmToken::Identifier: {
3719 OperandInfoTy Msg(ID_UNKNOWN_);
3720 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00003721 int64_t StreamId = STREAM_ID_DEFAULT_;
3722 if (parseSendMsgConstruct(Msg, Operation, StreamId))
3723 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003724 do {
3725 // Validate and encode message ID.
3726 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
3727 || Msg.Id == ID_SYSMSG)) {
3728 if (Msg.IsSymbolic)
3729 Error(S, "invalid/unsupported symbolic name of message");
3730 else
3731 Error(S, "invalid/unsupported code of message");
3732 break;
3733 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003734 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003735 // Validate and encode operation ID.
3736 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
3737 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
3738 if (Operation.IsSymbolic)
3739 Error(S, "invalid symbolic name of GS_OP");
3740 else
3741 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
3742 break;
3743 }
3744 if (Operation.Id == OP_GS_NOP
3745 && Msg.Id != ID_GS_DONE) {
3746 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
3747 break;
3748 }
3749 Imm16Val |= (Operation.Id << OP_SHIFT_);
3750 }
3751 if (Msg.Id == ID_SYSMSG) {
3752 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
3753 if (Operation.IsSymbolic)
3754 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
3755 else
3756 Error(S, "invalid/unsupported code of SYSMSG_OP");
3757 break;
3758 }
3759 Imm16Val |= (Operation.Id << OP_SHIFT_);
3760 }
3761 // Validate and encode stream ID.
3762 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3763 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
3764 Error(S, "invalid stream id: only 2-bit values are legal");
3765 break;
3766 }
3767 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
3768 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003769 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003770 }
3771 break;
3772 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003773 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003774 return MatchOperand_Success;
3775}
3776
3777bool AMDGPUOperand::isSendMsg() const {
3778 return isImmTy(ImmTySendMsg);
3779}
3780
Tom Stellard45bb48e2015-06-13 03:28:10 +00003781//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003782// parser helpers
3783//===----------------------------------------------------------------------===//
3784
3785bool
3786AMDGPUAsmParser::trySkipId(const StringRef Id) {
3787 if (getLexer().getKind() == AsmToken::Identifier &&
3788 Parser.getTok().getString() == Id) {
3789 Parser.Lex();
3790 return true;
3791 }
3792 return false;
3793}
3794
3795bool
3796AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
3797 if (getLexer().getKind() == Kind) {
3798 Parser.Lex();
3799 return true;
3800 }
3801 return false;
3802}
3803
3804bool
3805AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
3806 const StringRef ErrMsg) {
3807 if (!trySkipToken(Kind)) {
3808 Error(Parser.getTok().getLoc(), ErrMsg);
3809 return false;
3810 }
3811 return true;
3812}
3813
3814bool
3815AMDGPUAsmParser::parseExpr(int64_t &Imm) {
3816 return !getParser().parseAbsoluteExpression(Imm);
3817}
3818
3819bool
3820AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
3821 SMLoc S = Parser.getTok().getLoc();
3822 if (getLexer().getKind() == AsmToken::String) {
3823 Val = Parser.getTok().getStringContents();
3824 Parser.Lex();
3825 return true;
3826 } else {
3827 Error(S, ErrMsg);
3828 return false;
3829 }
3830}
3831
3832//===----------------------------------------------------------------------===//
3833// swizzle
3834//===----------------------------------------------------------------------===//
3835
3836LLVM_READNONE
3837static unsigned
3838encodeBitmaskPerm(const unsigned AndMask,
3839 const unsigned OrMask,
3840 const unsigned XorMask) {
3841 using namespace llvm::AMDGPU::Swizzle;
3842
3843 return BITMASK_PERM_ENC |
3844 (AndMask << BITMASK_AND_SHIFT) |
3845 (OrMask << BITMASK_OR_SHIFT) |
3846 (XorMask << BITMASK_XOR_SHIFT);
3847}
3848
3849bool
3850AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
3851 const unsigned MinVal,
3852 const unsigned MaxVal,
3853 const StringRef ErrMsg) {
3854 for (unsigned i = 0; i < OpNum; ++i) {
3855 if (!skipToken(AsmToken::Comma, "expected a comma")){
3856 return false;
3857 }
3858 SMLoc ExprLoc = Parser.getTok().getLoc();
3859 if (!parseExpr(Op[i])) {
3860 return false;
3861 }
3862 if (Op[i] < MinVal || Op[i] > MaxVal) {
3863 Error(ExprLoc, ErrMsg);
3864 return false;
3865 }
3866 }
3867
3868 return true;
3869}
3870
3871bool
3872AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
3873 using namespace llvm::AMDGPU::Swizzle;
3874
3875 int64_t Lane[LANE_NUM];
3876 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
3877 "expected a 2-bit lane id")) {
3878 Imm = QUAD_PERM_ENC;
3879 for (auto i = 0; i < LANE_NUM; ++i) {
3880 Imm |= Lane[i] << (LANE_SHIFT * i);
3881 }
3882 return true;
3883 }
3884 return false;
3885}
3886
3887bool
3888AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
3889 using namespace llvm::AMDGPU::Swizzle;
3890
3891 SMLoc S = Parser.getTok().getLoc();
3892 int64_t GroupSize;
3893 int64_t LaneIdx;
3894
3895 if (!parseSwizzleOperands(1, &GroupSize,
3896 2, 32,
3897 "group size must be in the interval [2,32]")) {
3898 return false;
3899 }
3900 if (!isPowerOf2_64(GroupSize)) {
3901 Error(S, "group size must be a power of two");
3902 return false;
3903 }
3904 if (parseSwizzleOperands(1, &LaneIdx,
3905 0, GroupSize - 1,
3906 "lane id must be in the interval [0,group size - 1]")) {
3907 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
3908 return true;
3909 }
3910 return false;
3911}
3912
3913bool
3914AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
3915 using namespace llvm::AMDGPU::Swizzle;
3916
3917 SMLoc S = Parser.getTok().getLoc();
3918 int64_t GroupSize;
3919
3920 if (!parseSwizzleOperands(1, &GroupSize,
3921 2, 32, "group size must be in the interval [2,32]")) {
3922 return false;
3923 }
3924 if (!isPowerOf2_64(GroupSize)) {
3925 Error(S, "group size must be a power of two");
3926 return false;
3927 }
3928
3929 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
3930 return true;
3931}
3932
3933bool
3934AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
3935 using namespace llvm::AMDGPU::Swizzle;
3936
3937 SMLoc S = Parser.getTok().getLoc();
3938 int64_t GroupSize;
3939
3940 if (!parseSwizzleOperands(1, &GroupSize,
3941 1, 16, "group size must be in the interval [1,16]")) {
3942 return false;
3943 }
3944 if (!isPowerOf2_64(GroupSize)) {
3945 Error(S, "group size must be a power of two");
3946 return false;
3947 }
3948
3949 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
3950 return true;
3951}
3952
3953bool
3954AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
3955 using namespace llvm::AMDGPU::Swizzle;
3956
3957 if (!skipToken(AsmToken::Comma, "expected a comma")) {
3958 return false;
3959 }
3960
3961 StringRef Ctl;
3962 SMLoc StrLoc = Parser.getTok().getLoc();
3963 if (!parseString(Ctl)) {
3964 return false;
3965 }
3966 if (Ctl.size() != BITMASK_WIDTH) {
3967 Error(StrLoc, "expected a 5-character mask");
3968 return false;
3969 }
3970
3971 unsigned AndMask = 0;
3972 unsigned OrMask = 0;
3973 unsigned XorMask = 0;
3974
3975 for (size_t i = 0; i < Ctl.size(); ++i) {
3976 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
3977 switch(Ctl[i]) {
3978 default:
3979 Error(StrLoc, "invalid mask");
3980 return false;
3981 case '0':
3982 break;
3983 case '1':
3984 OrMask |= Mask;
3985 break;
3986 case 'p':
3987 AndMask |= Mask;
3988 break;
3989 case 'i':
3990 AndMask |= Mask;
3991 XorMask |= Mask;
3992 break;
3993 }
3994 }
3995
3996 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
3997 return true;
3998}
3999
4000bool
4001AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
4002
4003 SMLoc OffsetLoc = Parser.getTok().getLoc();
4004
4005 if (!parseExpr(Imm)) {
4006 return false;
4007 }
4008 if (!isUInt<16>(Imm)) {
4009 Error(OffsetLoc, "expected a 16-bit offset");
4010 return false;
4011 }
4012 return true;
4013}
4014
4015bool
4016AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
4017 using namespace llvm::AMDGPU::Swizzle;
4018
4019 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
4020
4021 SMLoc ModeLoc = Parser.getTok().getLoc();
4022 bool Ok = false;
4023
4024 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
4025 Ok = parseSwizzleQuadPerm(Imm);
4026 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
4027 Ok = parseSwizzleBitmaskPerm(Imm);
4028 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
4029 Ok = parseSwizzleBroadcast(Imm);
4030 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
4031 Ok = parseSwizzleSwap(Imm);
4032 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
4033 Ok = parseSwizzleReverse(Imm);
4034 } else {
4035 Error(ModeLoc, "expected a swizzle mode");
4036 }
4037
4038 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
4039 }
4040
4041 return false;
4042}
4043
4044OperandMatchResultTy
4045AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
4046 SMLoc S = Parser.getTok().getLoc();
4047 int64_t Imm = 0;
4048
4049 if (trySkipId("offset")) {
4050
4051 bool Ok = false;
4052 if (skipToken(AsmToken::Colon, "expected a colon")) {
4053 if (trySkipId("swizzle")) {
4054 Ok = parseSwizzleMacro(Imm);
4055 } else {
4056 Ok = parseSwizzleOffset(Imm);
4057 }
4058 }
4059
4060 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
4061
4062 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
4063 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00004064 // Swizzle "offset" operand is optional.
4065 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004066 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004067 }
4068}
4069
4070bool
4071AMDGPUOperand::isSwizzle() const {
4072 return isImmTy(ImmTySwizzle);
4073}
4074
4075//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004076// sopp branch targets
4077//===----------------------------------------------------------------------===//
4078
Alex Bradbury58eba092016-11-01 16:32:05 +00004079OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004080AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
4081 SMLoc S = Parser.getTok().getLoc();
4082
4083 switch (getLexer().getKind()) {
4084 default: return MatchOperand_ParseFail;
4085 case AsmToken::Integer: {
4086 int64_t Imm;
4087 if (getParser().parseAbsoluteExpression(Imm))
4088 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004089 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004090 return MatchOperand_Success;
4091 }
4092
4093 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004094 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00004095 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
4096 Parser.getTok().getString()), getContext()), S));
4097 Parser.Lex();
4098 return MatchOperand_Success;
4099 }
4100}
4101
4102//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004103// mubuf
4104//===----------------------------------------------------------------------===//
4105
Sam Kolton5f10a132016-05-06 11:31:17 +00004106AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004107 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004108}
4109
4110AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004111 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004112}
4113
4114AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004115 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00004116}
4117
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004118void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
4119 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004120 bool IsAtomic,
4121 bool IsAtomicReturn,
4122 bool IsLds) {
4123 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004124 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004125 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004126 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004127
4128 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4129 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4130
4131 // Add the register arguments
4132 if (Op.isReg()) {
4133 Op.addRegOperands(Inst, 1);
4134 continue;
4135 }
4136
4137 // Handle the case where soffset is an immediate
4138 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4139 Op.addImmOperands(Inst, 1);
4140 continue;
4141 }
4142
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004143 HasLdsModifier = Op.isLDS();
4144
Tom Stellard45bb48e2015-06-13 03:28:10 +00004145 // Handle tokens like 'offen' which are sometimes hard-coded into the
4146 // asm string. There are no MCInst operands for these.
4147 if (Op.isToken()) {
4148 continue;
4149 }
4150 assert(Op.isImm());
4151
4152 // Handle optional arguments
4153 OptionalIdx[Op.getImmTy()] = i;
4154 }
4155
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004156 // This is a workaround for an llvm quirk which may result in an
4157 // incorrect instruction selection. Lds and non-lds versions of
4158 // MUBUF instructions are identical except that lds versions
4159 // have mandatory 'lds' modifier. However this modifier follows
4160 // optional modifiers and llvm asm matcher regards this 'lds'
4161 // modifier as an optional one. As a result, an lds version
4162 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004163 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004164 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
4165 if (NoLdsOpcode != -1) { // Got lds version - correct it.
4166 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004167 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004168 }
4169 }
4170
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004171 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
4172 if (IsAtomicReturn) {
4173 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
4174 Inst.insert(I, *I);
4175 }
4176
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004177 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004178 if (!IsAtomic) { // glc is hard-coded.
4179 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4180 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004181 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004182
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004183 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004184 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4185 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004186}
4187
David Stuttard70e8bc12017-06-22 16:29:22 +00004188void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
4189 OptionalImmIndexMap OptionalIdx;
4190
4191 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4192 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4193
4194 // Add the register arguments
4195 if (Op.isReg()) {
4196 Op.addRegOperands(Inst, 1);
4197 continue;
4198 }
4199
4200 // Handle the case where soffset is an immediate
4201 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4202 Op.addImmOperands(Inst, 1);
4203 continue;
4204 }
4205
4206 // Handle tokens like 'offen' which are sometimes hard-coded into the
4207 // asm string. There are no MCInst operands for these.
4208 if (Op.isToken()) {
4209 continue;
4210 }
4211 assert(Op.isImm());
4212
4213 // Handle optional arguments
4214 OptionalIdx[Op.getImmTy()] = i;
4215 }
4216
4217 addOptionalImmOperand(Inst, Operands, OptionalIdx,
4218 AMDGPUOperand::ImmTyOffset);
4219 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDFMT);
4220 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyNFMT);
4221 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4222 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4223 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4224}
4225
Tom Stellard45bb48e2015-06-13 03:28:10 +00004226//===----------------------------------------------------------------------===//
4227// mimg
4228//===----------------------------------------------------------------------===//
4229
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004230void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
4231 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00004232 unsigned I = 1;
4233 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4234 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4235 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4236 }
4237
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004238 if (IsAtomic) {
4239 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004240 assert(Desc.getNumDefs() == 1);
4241 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004242 }
4243
Sam Kolton1bdcef72016-05-23 09:59:02 +00004244 OptionalImmIndexMap OptionalIdx;
4245
4246 for (unsigned E = Operands.size(); I != E; ++I) {
4247 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4248
4249 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004250 if (Op.isReg()) {
4251 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004252 } else if (Op.isImmModifier()) {
4253 OptionalIdx[Op.getImmTy()] = I;
4254 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00004255 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00004256 }
4257 }
4258
4259 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
4260 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
4261 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004262 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004263 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
4264 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4265 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004266 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00004267 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004268}
4269
4270void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004271 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004272}
4273
Sam Kolton5f10a132016-05-06 11:31:17 +00004274AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004275 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00004276}
4277
4278AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004279 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00004280}
4281
4282AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004283 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00004284}
4285
4286AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004287 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00004288}
4289
4290AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004291 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00004292}
4293
Nicolai Haehnlef2674312018-06-21 13:36:01 +00004294AMDGPUOperand::Ptr AMDGPUAsmParser::defaultD16() const {
4295 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyD16);
4296}
4297
Tom Stellard45bb48e2015-06-13 03:28:10 +00004298//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00004299// smrd
4300//===----------------------------------------------------------------------===//
4301
Artem Tamazov54bfd542016-10-31 16:07:39 +00004302bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00004303 return isImm() && isUInt<8>(getImm());
4304}
4305
Artem Tamazov54bfd542016-10-31 16:07:39 +00004306bool AMDGPUOperand::isSMRDOffset20() const {
4307 return isImm() && isUInt<20>(getImm());
4308}
4309
Tom Stellard217361c2015-08-06 19:28:38 +00004310bool AMDGPUOperand::isSMRDLiteralOffset() const {
4311 // 32-bit literals are only supported on CI and we only want to use them
4312 // when the offset is > 8-bits.
4313 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
4314}
4315
Artem Tamazov54bfd542016-10-31 16:07:39 +00004316AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
4317 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4318}
4319
4320AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004321 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00004322}
4323
4324AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004325 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00004326}
4327
Matt Arsenaultfd023142017-06-12 15:55:58 +00004328AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
4329 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4330}
4331
Matt Arsenault9698f1c2017-06-20 19:54:14 +00004332AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
4333 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4334}
4335
Tom Stellard217361c2015-08-06 19:28:38 +00004336//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004337// vop3
4338//===----------------------------------------------------------------------===//
4339
4340static bool ConvertOmodMul(int64_t &Mul) {
4341 if (Mul != 1 && Mul != 2 && Mul != 4)
4342 return false;
4343
4344 Mul >>= 1;
4345 return true;
4346}
4347
4348static bool ConvertOmodDiv(int64_t &Div) {
4349 if (Div == 1) {
4350 Div = 0;
4351 return true;
4352 }
4353
4354 if (Div == 2) {
4355 Div = 3;
4356 return true;
4357 }
4358
4359 return false;
4360}
4361
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004362static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
4363 if (BoundCtrl == 0) {
4364 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004365 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00004366 }
4367
4368 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004369 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004370 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004371 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004372
Tom Stellard45bb48e2015-06-13 03:28:10 +00004373 return false;
4374}
4375
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004376// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00004377static const OptionalOperand AMDGPUOptionalOperandTable[] = {
4378 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
4379 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
4380 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
4381 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
4382 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
4383 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004384 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004385 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00004386 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
David Stuttard70e8bc12017-06-22 16:29:22 +00004387 {"dfmt", AMDGPUOperand::ImmTyDFMT, false, nullptr},
4388 {"nfmt", AMDGPUOperand::ImmTyNFMT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004389 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
4390 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
4391 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00004392 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004393 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004394 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
4395 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
4396 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
4397 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
4398 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
4399 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00004400 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004401 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
4402 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
4403 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
4404 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00004405 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
4406 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
4407 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004408 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004409 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004410 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004411 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
4412 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
4413 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
4414 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004415};
Tom Stellard45bb48e2015-06-13 03:28:10 +00004416
Alex Bradbury58eba092016-11-01 16:32:05 +00004417OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004418 unsigned size = Operands.size();
4419 assert(size > 0);
4420
4421 OperandMatchResultTy res = parseOptionalOpr(Operands);
4422
4423 // This is a hack to enable hardcoded mandatory operands which follow
4424 // optional operands.
4425 //
4426 // Current design assumes that all operands after the first optional operand
4427 // are also optional. However implementation of some instructions violates
4428 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
4429 //
4430 // To alleviate this problem, we have to (implicitly) parse extra operands
4431 // to make sure autogenerated parser of custom operands never hit hardcoded
4432 // mandatory operands.
4433
4434 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
4435
4436 // We have parsed the first optional operand.
4437 // Parse as many operands as necessary to skip all mandatory operands.
4438
4439 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
4440 if (res != MatchOperand_Success ||
4441 getLexer().is(AsmToken::EndOfStatement)) break;
4442 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
4443 res = parseOptionalOpr(Operands);
4444 }
4445 }
4446
4447 return res;
4448}
4449
4450OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00004451 OperandMatchResultTy res;
4452 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
4453 // try to parse any optional operand here
4454 if (Op.IsBit) {
4455 res = parseNamedBit(Op.Name, Operands, Op.Type);
4456 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
4457 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004458 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
4459 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
4460 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
4461 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00004462 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
4463 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004464 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
4465 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
4466 Op.Type == AMDGPUOperand::ImmTyNegLo ||
4467 Op.Type == AMDGPUOperand::ImmTyNegHi) {
4468 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
4469 Op.ConvertResult);
Sam Kolton11de3702016-05-24 12:38:33 +00004470 } else {
4471 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
4472 }
4473 if (res != MatchOperand_NoMatch) {
4474 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004475 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004476 }
4477 return MatchOperand_NoMatch;
4478}
4479
Matt Arsenault12c53892016-11-15 19:58:54 +00004480OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004481 StringRef Name = Parser.getTok().getString();
4482 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00004483 return parseIntWithPrefix("mul", Operands,
4484 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004485 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004486
4487 if (Name == "div") {
4488 return parseIntWithPrefix("div", Operands,
4489 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4490 }
4491
4492 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004493}
4494
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004495void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
4496 cvtVOP3P(Inst, Operands);
4497
4498 int Opc = Inst.getOpcode();
4499
4500 int SrcNum;
4501 const int Ops[] = { AMDGPU::OpName::src0,
4502 AMDGPU::OpName::src1,
4503 AMDGPU::OpName::src2 };
4504 for (SrcNum = 0;
4505 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
4506 ++SrcNum);
4507 assert(SrcNum > 0);
4508
4509 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4510 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4511
4512 if ((OpSel & (1 << SrcNum)) != 0) {
4513 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
4514 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
4515 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
4516 }
4517}
4518
Sam Koltona3ec5c12016-10-07 14:46:06 +00004519static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4520 // 1. This operand is input modifiers
4521 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4522 // 2. This is not last operand
4523 && Desc.NumOperands > (OpNum + 1)
4524 // 3. Next operand is register class
4525 && Desc.OpInfo[OpNum + 1].RegClass != -1
4526 // 4. Next register is not tied to any other operand
4527 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4528}
4529
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00004530void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
4531{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004532 OptionalImmIndexMap OptionalIdx;
4533 unsigned Opc = Inst.getOpcode();
4534
4535 unsigned I = 1;
4536 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4537 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4538 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4539 }
4540
4541 for (unsigned E = Operands.size(); I != E; ++I) {
4542 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4543 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4544 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4545 } else if (Op.isInterpSlot() ||
4546 Op.isInterpAttr() ||
4547 Op.isAttrChan()) {
4548 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
4549 } else if (Op.isImmModifier()) {
4550 OptionalIdx[Op.getImmTy()] = I;
4551 } else {
4552 llvm_unreachable("unhandled operand type");
4553 }
4554 }
4555
4556 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
4557 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
4558 }
4559
4560 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4561 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4562 }
4563
4564 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4565 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4566 }
4567}
4568
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004569void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
4570 OptionalImmIndexMap &OptionalIdx) {
4571 unsigned Opc = Inst.getOpcode();
4572
Tom Stellarda90b9522016-02-11 03:28:15 +00004573 unsigned I = 1;
4574 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00004575 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004576 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00004577 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004578
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004579 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
4580 // This instruction has src modifiers
4581 for (unsigned E = Operands.size(); I != E; ++I) {
4582 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4583 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4584 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4585 } else if (Op.isImmModifier()) {
4586 OptionalIdx[Op.getImmTy()] = I;
4587 } else if (Op.isRegOrImm()) {
4588 Op.addRegOrImmOperands(Inst, 1);
4589 } else {
4590 llvm_unreachable("unhandled operand type");
4591 }
4592 }
4593 } else {
4594 // No src modifiers
4595 for (unsigned E = Operands.size(); I != E; ++I) {
4596 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4597 if (Op.isMod()) {
4598 OptionalIdx[Op.getImmTy()] = I;
4599 } else {
4600 Op.addRegOrImmOperands(Inst, 1);
4601 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004602 }
Tom Stellarda90b9522016-02-11 03:28:15 +00004603 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004604
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004605 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4606 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4607 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004608
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004609 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4610 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4611 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00004612
Matt Arsenault0084adc2018-04-30 19:08:16 +00004613 // Special case v_mac_{f16, f32} and v_fmac_f32 (gfx906):
Sam Koltona3ec5c12016-10-07 14:46:06 +00004614 // it has src2 register operand that is tied to dst operand
4615 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00004616 // should be 0.
4617 if (Opc == AMDGPU::V_MAC_F32_e64_si ||
4618 Opc == AMDGPU::V_MAC_F32_e64_vi ||
4619 Opc == AMDGPU::V_MAC_F16_e64_vi ||
4620 Opc == AMDGPU::V_FMAC_F32_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004621 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004622 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004623 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
4624 ++it;
4625 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4626 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004627}
4628
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004629void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00004630 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004631 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00004632}
4633
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00004634void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
4635 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004636 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00004637 const int Opc = Inst.getOpcode();
4638 const MCInstrDesc &Desc = MII.get(Opc);
4639
4640 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004641
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004642 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004643
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00004644 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
4645 assert(!IsPacked);
4646 Inst.addOperand(Inst.getOperand(0));
4647 }
4648
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004649 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
4650 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004651
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004652 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004653
4654 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
4655 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00004656 int DefaultVal = IsPacked ? -1 : 0;
4657 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
4658 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004659 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004660
4661 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
4662 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00004663 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004664 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
4665 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
4666 }
4667
4668 const int Ops[] = { AMDGPU::OpName::src0,
4669 AMDGPU::OpName::src1,
4670 AMDGPU::OpName::src2 };
4671 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
4672 AMDGPU::OpName::src1_modifiers,
4673 AMDGPU::OpName::src2_modifiers };
4674
4675 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004676
4677 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004678 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004679 unsigned NegLo = 0;
4680 unsigned NegHi = 0;
4681
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004682 if (OpSelHiIdx != -1) {
4683 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
4684 }
4685
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004686 if (NegLoIdx != -1) {
4687 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
4688 NegLo = Inst.getOperand(NegLoIdx).getImm();
4689 NegHi = Inst.getOperand(NegHiIdx).getImm();
4690 }
4691
4692 for (int J = 0; J < 3; ++J) {
4693 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
4694 if (OpIdx == -1)
4695 break;
4696
4697 uint32_t ModVal = 0;
4698
4699 if ((OpSel & (1 << J)) != 0)
4700 ModVal |= SISrcMods::OP_SEL_0;
4701
4702 if ((OpSelHi & (1 << J)) != 0)
4703 ModVal |= SISrcMods::OP_SEL_1;
4704
4705 if ((NegLo & (1 << J)) != 0)
4706 ModVal |= SISrcMods::NEG;
4707
4708 if ((NegHi & (1 << J)) != 0)
4709 ModVal |= SISrcMods::NEG_HI;
4710
4711 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
4712
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00004713 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004714 }
4715}
4716
Sam Koltondfa29f72016-03-09 12:29:31 +00004717//===----------------------------------------------------------------------===//
4718// dpp
4719//===----------------------------------------------------------------------===//
4720
4721bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004722 using namespace AMDGPU::DPP;
4723
Sam Koltondfa29f72016-03-09 12:29:31 +00004724 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
4725 if (result) {
4726 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004727 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
4728 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
4729 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
4730 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
4731 (Imm == DppCtrl::WAVE_SHL1) ||
4732 (Imm == DppCtrl::WAVE_ROL1) ||
4733 (Imm == DppCtrl::WAVE_SHR1) ||
4734 (Imm == DppCtrl::WAVE_ROR1) ||
4735 (Imm == DppCtrl::ROW_MIRROR) ||
4736 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
4737 (Imm == DppCtrl::BCAST15) ||
4738 (Imm == DppCtrl::BCAST31);
Sam Koltondfa29f72016-03-09 12:29:31 +00004739 }
4740 return false;
4741}
4742
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00004743bool AMDGPUOperand::isGPRIdxMode() const {
4744 return isImm() && isUInt<4>(getImm());
4745}
4746
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00004747bool AMDGPUOperand::isS16Imm() const {
4748 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
4749}
4750
4751bool AMDGPUOperand::isU16Imm() const {
4752 return isImm() && isUInt<16>(getImm());
4753}
4754
Alex Bradbury58eba092016-11-01 16:32:05 +00004755OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00004756AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004757 using namespace AMDGPU::DPP;
4758
Sam Koltondfa29f72016-03-09 12:29:31 +00004759 SMLoc S = Parser.getTok().getLoc();
4760 StringRef Prefix;
4761 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00004762
Sam Koltona74cd522016-03-18 15:35:51 +00004763 if (getLexer().getKind() == AsmToken::Identifier) {
4764 Prefix = Parser.getTok().getString();
4765 } else {
4766 return MatchOperand_NoMatch;
4767 }
4768
4769 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004770 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004771 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004772 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004773 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004774 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004775 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00004776 // Check to prevent parseDPPCtrlOps from eating invalid tokens
4777 if (Prefix != "quad_perm"
4778 && Prefix != "row_shl"
4779 && Prefix != "row_shr"
4780 && Prefix != "row_ror"
4781 && Prefix != "wave_shl"
4782 && Prefix != "wave_rol"
4783 && Prefix != "wave_shr"
4784 && Prefix != "wave_ror"
4785 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00004786 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00004787 }
4788
Sam Koltona74cd522016-03-18 15:35:51 +00004789 Parser.Lex();
4790 if (getLexer().isNot(AsmToken::Colon))
4791 return MatchOperand_ParseFail;
4792
4793 if (Prefix == "quad_perm") {
4794 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00004795 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004796 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00004797 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004798 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00004799
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004800 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00004801 return MatchOperand_ParseFail;
4802
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004803 for (int i = 0; i < 3; ++i) {
4804 if (getLexer().isNot(AsmToken::Comma))
4805 return MatchOperand_ParseFail;
4806 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00004807
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004808 int64_t Temp;
4809 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
4810 return MatchOperand_ParseFail;
4811 const int shift = i*2 + 2;
4812 Int += (Temp << shift);
4813 }
Sam Koltona74cd522016-03-18 15:35:51 +00004814
Sam Koltona74cd522016-03-18 15:35:51 +00004815 if (getLexer().isNot(AsmToken::RBrac))
4816 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004817 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004818 } else {
4819 // sel:%d
4820 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004821 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00004822 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004823
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004824 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004825 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004826 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004827 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004828 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004829 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004830 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004831 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004832 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004833 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004834 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004835 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004836 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004837 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00004838 } else if (Prefix == "row_bcast") {
4839 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004840 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00004841 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00004842 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00004843 } else {
4844 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004845 }
4846 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00004847 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004848 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004849 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004850 }
Sam Koltona74cd522016-03-18 15:35:51 +00004851
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004852 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00004853 return MatchOperand_Success;
4854}
4855
Sam Kolton5f10a132016-05-06 11:31:17 +00004856AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004857 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00004858}
4859
Sam Kolton5f10a132016-05-06 11:31:17 +00004860AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004861 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00004862}
4863
Sam Kolton5f10a132016-05-06 11:31:17 +00004864AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004865 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00004866}
4867
4868void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00004869 OptionalImmIndexMap OptionalIdx;
4870
4871 unsigned I = 1;
4872 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4873 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4874 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4875 }
4876
Connor Abbott79f3ade2017-08-07 19:10:56 +00004877 // All DPP instructions with at least one source operand have a fake "old"
4878 // source at the beginning that's tied to the dst operand. Handle it here.
4879 if (Desc.getNumOperands() >= 2)
4880 Inst.addOperand(Inst.getOperand(0));
4881
Sam Koltondfa29f72016-03-09 12:29:31 +00004882 for (unsigned E = Operands.size(); I != E; ++I) {
4883 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4884 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00004885 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00004886 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00004887 // Skip it.
4888 continue;
4889 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00004890 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00004891 } else if (Op.isDPPCtrl()) {
4892 Op.addImmOperands(Inst, 1);
4893 } else if (Op.isImm()) {
4894 // Handle optional arguments
4895 OptionalIdx[Op.getImmTy()] = I;
4896 } else {
4897 llvm_unreachable("Invalid operand type");
4898 }
4899 }
4900
Sam Koltondfa29f72016-03-09 12:29:31 +00004901 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
4902 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
4903 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
4904}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00004905
Sam Kolton3025e7f2016-04-26 13:33:56 +00004906//===----------------------------------------------------------------------===//
4907// sdwa
4908//===----------------------------------------------------------------------===//
4909
Alex Bradbury58eba092016-11-01 16:32:05 +00004910OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00004911AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
4912 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004913 using namespace llvm::AMDGPU::SDWA;
4914
Sam Kolton3025e7f2016-04-26 13:33:56 +00004915 SMLoc S = Parser.getTok().getLoc();
4916 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004917 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004918
Sam Kolton05ef1c92016-06-03 10:27:37 +00004919 res = parseStringWithPrefix(Prefix, Value);
4920 if (res != MatchOperand_Success) {
4921 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004922 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00004923
Sam Kolton3025e7f2016-04-26 13:33:56 +00004924 int64_t Int;
4925 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004926 .Case("BYTE_0", SdwaSel::BYTE_0)
4927 .Case("BYTE_1", SdwaSel::BYTE_1)
4928 .Case("BYTE_2", SdwaSel::BYTE_2)
4929 .Case("BYTE_3", SdwaSel::BYTE_3)
4930 .Case("WORD_0", SdwaSel::WORD_0)
4931 .Case("WORD_1", SdwaSel::WORD_1)
4932 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004933 .Default(0xffffffff);
4934 Parser.Lex(); // eat last token
4935
4936 if (Int == 0xffffffff) {
4937 return MatchOperand_ParseFail;
4938 }
4939
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004940 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004941 return MatchOperand_Success;
4942}
4943
Alex Bradbury58eba092016-11-01 16:32:05 +00004944OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00004945AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004946 using namespace llvm::AMDGPU::SDWA;
4947
Sam Kolton3025e7f2016-04-26 13:33:56 +00004948 SMLoc S = Parser.getTok().getLoc();
4949 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004950 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004951
4952 res = parseStringWithPrefix("dst_unused", Value);
4953 if (res != MatchOperand_Success) {
4954 return res;
4955 }
4956
4957 int64_t Int;
4958 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004959 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
4960 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
4961 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004962 .Default(0xffffffff);
4963 Parser.Lex(); // eat last token
4964
4965 if (Int == 0xffffffff) {
4966 return MatchOperand_ParseFail;
4967 }
4968
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004969 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004970 return MatchOperand_Success;
4971}
4972
Sam Kolton945231a2016-06-10 09:57:59 +00004973void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004974 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004975}
4976
Sam Kolton945231a2016-06-10 09:57:59 +00004977void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004978 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
4979}
4980
Sam Koltonf7659d712017-05-23 10:08:55 +00004981void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
4982 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
4983}
4984
Sam Kolton5196b882016-07-01 09:59:21 +00004985void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00004986 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00004987}
4988
4989void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00004990 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00004991 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00004992
Sam Kolton05ef1c92016-06-03 10:27:37 +00004993 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00004994 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004995
4996 unsigned I = 1;
4997 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4998 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4999 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5000 }
5001
5002 for (unsigned E = Operands.size(); I != E; ++I) {
5003 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltonf7659d712017-05-23 10:08:55 +00005004 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
5005 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
5006 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
5007 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
5008 // Skip VCC only if we didn't skip it on previous iteration.
5009 if (BasicInstType == SIInstrFlags::VOP2 &&
5010 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
5011 skippedVcc = true;
5012 continue;
5013 } else if (BasicInstType == SIInstrFlags::VOPC &&
5014 Inst.getNumOperands() == 0) {
5015 skippedVcc = true;
5016 continue;
5017 }
5018 }
5019 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00005020 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005021 } else if (Op.isImm()) {
5022 // Handle optional arguments
5023 OptionalIdx[Op.getImmTy()] = I;
5024 } else {
5025 llvm_unreachable("Invalid operand type");
5026 }
Sam Koltonf7659d712017-05-23 10:08:55 +00005027 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005028 }
5029
Sam Koltonf7659d712017-05-23 10:08:55 +00005030 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
5031 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00005032 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00005033 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005034 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00005035 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005036 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005037 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5038 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005039 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5040 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5041 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005042 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005043
5044 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00005045 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005046 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005047 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5048 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005049 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5050 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5051 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5052 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005053 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005054
5055 case SIInstrFlags::VOPC:
Sam Kolton549c89d2017-06-21 08:53:38 +00005056 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00005057 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5058 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005059 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005060
Sam Koltona3ec5c12016-10-07 14:46:06 +00005061 default:
5062 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
5063 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005064 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00005065
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005066 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00005067 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00005068 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
5069 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005070 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005071 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00005072 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005073 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00005074 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005075}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00005076
Tom Stellard45bb48e2015-06-13 03:28:10 +00005077/// Force static initialization.
5078extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00005079 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
5080 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00005081}
5082
5083#define GET_REGISTER_MATCHER
5084#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00005085#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00005086#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00005087
Sam Kolton11de3702016-05-24 12:38:33 +00005088// This fuction should be defined after auto-generated include so that we have
5089// MatchClassKind enum defined
5090unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
5091 unsigned Kind) {
5092 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00005093 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00005094 // operand. This method checks if we are given immediate operand but expect to
5095 // get corresponding token.
5096 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
5097 switch (Kind) {
5098 case MCK_addr64:
5099 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
5100 case MCK_gds:
5101 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005102 case MCK_lds:
5103 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005104 case MCK_glc:
5105 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
5106 case MCK_idxen:
5107 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
5108 case MCK_offen:
5109 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005110 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00005111 // When operands have expression values, they will return true for isToken,
5112 // because it is not possible to distinguish between a token and an
5113 // expression at parse time. MatchInstructionImpl() will always try to
5114 // match an operand as a token, when isToken returns true, and when the
5115 // name of the expression is not a valid token, the match will fail,
5116 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005117 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
5118 case MCK_SSrcF32:
5119 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00005120 case MCK_SoppBrTarget:
5121 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005122 case MCK_VReg32OrOff:
5123 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00005124 case MCK_InterpSlot:
5125 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
5126 case MCK_Attr:
5127 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
5128 case MCK_AttrChan:
5129 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005130 default:
5131 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005132 }
5133}