blob: f9f846a38333f103c1843bbf892e768a5a6e0bd3 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00009#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000014#include "SIInstrInfo.h"
Richard Trieu8ce2ee92019-05-14 21:54:37 +000015#include "TargetInfo/AMDGPUTargetInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000016#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000018#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000020#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000021#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000023#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000025#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000028#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000029#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000033#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000037#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000039#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000040#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000043#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000044#include "llvm/Support/AMDGPUMetadata.h"
Scott Linder1e8c2c72018-06-21 19:38:56 +000045#include "llvm/Support/AMDHSAKernelDescriptor.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000047#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000048#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000049#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000050#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000051#include "llvm/Support/SMLoc.h"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +000052#include "llvm/Support/TargetParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000053#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000054#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000055#include <algorithm>
56#include <cassert>
57#include <cstdint>
58#include <cstring>
59#include <iterator>
60#include <map>
61#include <memory>
62#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000063
Tom Stellard45bb48e2015-06-13 03:28:10 +000064using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000065using namespace llvm::AMDGPU;
Scott Linder1e8c2c72018-06-21 19:38:56 +000066using namespace llvm::amdhsa;
Tom Stellard45bb48e2015-06-13 03:28:10 +000067
68namespace {
69
Sam Kolton1eeb11b2016-09-09 14:44:04 +000070class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000071
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000072enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
73
Sam Kolton1eeb11b2016-09-09 14:44:04 +000074//===----------------------------------------------------------------------===//
75// Operand
76//===----------------------------------------------------------------------===//
77
Tom Stellard45bb48e2015-06-13 03:28:10 +000078class AMDGPUOperand : public MCParsedAsmOperand {
79 enum KindTy {
80 Token,
81 Immediate,
82 Register,
83 Expression
84 } Kind;
85
86 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000087 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000088
89public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000090 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000091 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000092
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000093 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000094
Sam Kolton945231a2016-06-10 09:57:59 +000095 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000096 bool Abs = false;
97 bool Neg = false;
98 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000099
100 bool hasFPModifiers() const { return Abs || Neg; }
101 bool hasIntModifiers() const { return Sext; }
102 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
103
104 int64_t getFPModifiersOperand() const {
105 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000106 Operand |= Abs ? SISrcMods::ABS : 0u;
107 Operand |= Neg ? SISrcMods::NEG : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000108 return Operand;
109 }
110
111 int64_t getIntModifiersOperand() const {
112 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000113 Operand |= Sext ? SISrcMods::SEXT : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000114 return Operand;
115 }
116
117 int64_t getModifiersOperand() const {
118 assert(!(hasFPModifiers() && hasIntModifiers())
119 && "fp and int modifiers should not be used simultaneously");
120 if (hasFPModifiers()) {
121 return getFPModifiersOperand();
122 } else if (hasIntModifiers()) {
123 return getIntModifiersOperand();
124 } else {
125 return 0;
126 }
127 }
128
129 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
130 };
131
Tom Stellard45bb48e2015-06-13 03:28:10 +0000132 enum ImmTy {
133 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000135 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000136 ImmTyOffen,
137 ImmTyIdxen,
138 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000139 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000140 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000141 ImmTyOffset0,
142 ImmTyOffset1,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000143 ImmTyDLC,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000144 ImmTyGLC,
145 ImmTySLC,
146 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000147 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000148 ImmTyClampSI,
149 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000150 ImmTyDppCtrl,
151 ImmTyDppRowMask,
152 ImmTyDppBankMask,
153 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000154 ImmTySdwaDstSel,
155 ImmTySdwaSrc0Sel,
156 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000157 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000158 ImmTyDMask,
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +0000159 ImmTyDim,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000160 ImmTyUNorm,
161 ImmTyDA,
Ryan Taylor1f334d02018-08-28 15:07:30 +0000162 ImmTyR128A16,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000163 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000164 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000165 ImmTyExpCompr,
166 ImmTyExpVM,
Tim Renouf35484c92018-08-21 11:06:05 +0000167 ImmTyFORMAT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000168 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000169 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000170 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000171 ImmTyInterpSlot,
172 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000173 ImmTyAttrChan,
174 ImmTyOpSel,
175 ImmTyOpSelHi,
176 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000177 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000178 ImmTySwizzle,
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000179 ImmTyGprIdxMode,
David Stuttard20ea21c2019-03-12 09:52:58 +0000180 ImmTyEndpgm,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000181 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000182 };
183
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000184private:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000185 struct TokOp {
186 const char *Data;
187 unsigned Length;
188 };
189
190 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000192 ImmTy Type;
193 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000194 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000195 };
196
197 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000198 unsigned RegNo;
Matt Arsenault7f192982016-08-16 20:28:06 +0000199 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000200 };
201
202 union {
203 TokOp Tok;
204 ImmOp Imm;
205 RegOp Reg;
206 const MCExpr *Expr;
207 };
208
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000209public:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000210 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000211 if (Kind == Token)
212 return true;
213
214 if (Kind != Expression || !Expr)
215 return false;
216
217 // When parsing operands, we can't always tell if something was meant to be
218 // a token, like 'gds', or an expression that references a global variable.
219 // In this case, we assume the string is an expression, and if we need to
220 // interpret is a token, then we treat the symbol name as the token.
221 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000222 }
223
224 bool isImm() const override {
225 return Kind == Immediate;
226 }
227
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000228 bool isInlinableImm(MVT type) const;
229 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000230
Tom Stellard45bb48e2015-06-13 03:28:10 +0000231 bool isRegKind() const {
232 return Kind == Register;
233 }
234
235 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000236 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000237 }
238
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000239 bool isRegOrImmWithInputMods(unsigned RCID, MVT type) const {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000240 return isRegClass(RCID) || isInlinableImm(type) || isLiteralImm(type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000241 }
242
Matt Arsenault4bd72362016-12-10 00:39:12 +0000243 bool isRegOrImmWithInt16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000244 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000245 }
246
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000247 bool isRegOrImmWithInt32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000248 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000249 }
250
251 bool isRegOrImmWithInt64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000252 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000253 }
254
Matt Arsenault4bd72362016-12-10 00:39:12 +0000255 bool isRegOrImmWithFP16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000256 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000257 }
258
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000259 bool isRegOrImmWithFP32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000260 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000261 }
262
263 bool isRegOrImmWithFP64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000264 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000265 }
266
Sam Kolton9772eb32017-01-11 11:46:30 +0000267 bool isVReg() const {
268 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
269 isRegClass(AMDGPU::VReg_64RegClassID) ||
270 isRegClass(AMDGPU::VReg_96RegClassID) ||
271 isRegClass(AMDGPU::VReg_128RegClassID) ||
272 isRegClass(AMDGPU::VReg_256RegClassID) ||
273 isRegClass(AMDGPU::VReg_512RegClassID);
274 }
275
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000276 bool isVReg32() const {
277 return isRegClass(AMDGPU::VGPR_32RegClassID);
278 }
279
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000280 bool isVReg32OrOff() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000281 return isOff() || isVReg32();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000282 }
283
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000284 bool isSDWAOperand(MVT type) const;
285 bool isSDWAFP16Operand() const;
286 bool isSDWAFP32Operand() const;
287 bool isSDWAInt16Operand() const;
288 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000289
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000290 bool isImmTy(ImmTy ImmT) const {
291 return isImm() && Imm.Type == ImmT;
292 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000293
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000294 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000295 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000296 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000297
Sam Kolton945231a2016-06-10 09:57:59 +0000298 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
299 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
300 bool isDMask() const { return isImmTy(ImmTyDMask); }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +0000301 bool isDim() const { return isImmTy(ImmTyDim); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000302 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
303 bool isDA() const { return isImmTy(ImmTyDA); }
Ryan Taylor1f334d02018-08-28 15:07:30 +0000304 bool isR128A16() const { return isImmTy(ImmTyR128A16); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000305 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000306 bool isOff() const { return isImmTy(ImmTyOff); }
307 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000308 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
309 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000310 bool isOffen() const { return isImmTy(ImmTyOffen); }
311 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
312 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
313 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
Dmitry Preobrazhensky04bd1182019-03-20 17:13:58 +0000314 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<8>(getImm()); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000315 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000316
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000317 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
318 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000319 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000320 bool isLDS() const { return isImmTy(ImmTyLDS); }
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000321 bool isDLC() const { return isImmTy(ImmTyDLC); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000322 bool isGLC() const { return isImmTy(ImmTyGLC); }
323 bool isSLC() const { return isImmTy(ImmTySLC); }
324 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000325 bool isD16() const { return isImmTy(ImmTyD16); }
Tim Renouf35484c92018-08-21 11:06:05 +0000326 bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000327 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
328 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
329 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
330 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
331 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
332 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
333 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000334 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
335 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
336 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000337 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
338 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
339 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
340 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000341 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000342
Sam Kolton945231a2016-06-10 09:57:59 +0000343 bool isMod() const {
344 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000345 }
346
347 bool isRegOrImm() const {
348 return isReg() || isImm();
349 }
350
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000351 bool isRegClass(unsigned RCID) const;
352
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +0000353 bool isInlineValue() const;
354
Sam Kolton9772eb32017-01-11 11:46:30 +0000355 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
356 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
357 }
358
Matt Arsenault4bd72362016-12-10 00:39:12 +0000359 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000360 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000361 }
362
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000363 bool isSCSrcV2B16() const {
364 return isSCSrcB16();
365 }
366
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000367 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000368 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000369 }
370
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000371 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000372 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000373 }
374
Matt Arsenault4bd72362016-12-10 00:39:12 +0000375 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000376 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000377 }
378
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000379 bool isSCSrcV2F16() const {
380 return isSCSrcF16();
381 }
382
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000383 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000384 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000385 }
386
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000387 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000388 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000389 }
390
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000391 bool isSSrcB32() const {
392 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
393 }
394
Matt Arsenault4bd72362016-12-10 00:39:12 +0000395 bool isSSrcB16() const {
396 return isSCSrcB16() || isLiteralImm(MVT::i16);
397 }
398
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000399 bool isSSrcV2B16() const {
400 llvm_unreachable("cannot happen");
401 return isSSrcB16();
402 }
403
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000404 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000405 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
406 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000407 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000408 }
409
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000410 bool isSSrcF32() const {
411 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000412 }
413
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000414 bool isSSrcF64() const {
415 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000416 }
417
Matt Arsenault4bd72362016-12-10 00:39:12 +0000418 bool isSSrcF16() const {
419 return isSCSrcB16() || isLiteralImm(MVT::f16);
420 }
421
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000422 bool isSSrcV2F16() const {
423 llvm_unreachable("cannot happen");
424 return isSSrcF16();
425 }
426
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000427 bool isSSrcOrLdsB32() const {
428 return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
429 isLiteralImm(MVT::i32) || isExpr();
430 }
431
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000432 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000433 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000434 }
435
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000436 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000437 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000438 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000439
Matt Arsenault4bd72362016-12-10 00:39:12 +0000440 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000441 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000442 }
443
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000444 bool isVCSrcV2B16() const {
445 return isVCSrcB16();
446 }
447
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000448 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000449 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000450 }
451
452 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000453 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000454 }
455
Matt Arsenault4bd72362016-12-10 00:39:12 +0000456 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000457 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000458 }
459
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000460 bool isVCSrcV2F16() const {
461 return isVCSrcF16();
462 }
463
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000464 bool isVSrcB32() const {
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +0000465 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000466 }
467
468 bool isVSrcB64() const {
469 return isVCSrcF64() || isLiteralImm(MVT::i64);
470 }
471
Matt Arsenault4bd72362016-12-10 00:39:12 +0000472 bool isVSrcB16() const {
473 return isVCSrcF16() || isLiteralImm(MVT::i16);
474 }
475
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000476 bool isVSrcV2B16() const {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000477 return isVSrcB16() || isLiteralImm(MVT::v2i16);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000478 }
479
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000480 bool isVSrcF32() const {
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +0000481 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000482 }
483
484 bool isVSrcF64() const {
485 return isVCSrcF64() || isLiteralImm(MVT::f64);
486 }
487
Matt Arsenault4bd72362016-12-10 00:39:12 +0000488 bool isVSrcF16() const {
489 return isVCSrcF16() || isLiteralImm(MVT::f16);
490 }
491
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000492 bool isVSrcV2F16() const {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000493 return isVSrcF16() || isLiteralImm(MVT::v2f16);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000494 }
495
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000496 bool isKImmFP32() const {
497 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000498 }
499
Matt Arsenault4bd72362016-12-10 00:39:12 +0000500 bool isKImmFP16() const {
501 return isLiteralImm(MVT::f16);
502 }
503
Tom Stellard45bb48e2015-06-13 03:28:10 +0000504 bool isMem() const override {
505 return false;
506 }
507
508 bool isExpr() const {
509 return Kind == Expression;
510 }
511
512 bool isSoppBrTarget() const {
513 return isExpr() || isImm();
514 }
515
Sam Kolton945231a2016-06-10 09:57:59 +0000516 bool isSWaitCnt() const;
517 bool isHwreg() const;
518 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000519 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000520 bool isSMRDOffset8() const;
521 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000522 bool isSMRDLiteralOffset() const;
523 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000524 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000525 bool isS16Imm() const;
526 bool isU16Imm() const;
David Stuttard20ea21c2019-03-12 09:52:58 +0000527 bool isEndpgm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000528
Tom Stellard89049702016-06-15 02:54:14 +0000529 StringRef getExpressionAsToken() const {
530 assert(isExpr());
531 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
532 return S->getSymbol().getName();
533 }
534
Sam Kolton945231a2016-06-10 09:57:59 +0000535 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000536 assert(isToken());
537
538 if (Kind == Expression)
539 return getExpressionAsToken();
540
Sam Kolton945231a2016-06-10 09:57:59 +0000541 return StringRef(Tok.Data, Tok.Length);
542 }
543
544 int64_t getImm() const {
545 assert(isImm());
546 return Imm.Val;
547 }
548
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000549 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000550 assert(isImm());
551 return Imm.Type;
552 }
553
554 unsigned getReg() const override {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000555 assert(isRegKind());
Sam Kolton945231a2016-06-10 09:57:59 +0000556 return Reg.RegNo;
557 }
558
Tom Stellard45bb48e2015-06-13 03:28:10 +0000559 SMLoc getStartLoc() const override {
560 return StartLoc;
561 }
562
Peter Collingbourne0da86302016-10-10 22:49:37 +0000563 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000564 return EndLoc;
565 }
566
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000567 SMRange getLocRange() const {
568 return SMRange(StartLoc, EndLoc);
569 }
570
Sam Kolton945231a2016-06-10 09:57:59 +0000571 Modifiers getModifiers() const {
572 assert(isRegKind() || isImmTy(ImmTyNone));
573 return isRegKind() ? Reg.Mods : Imm.Mods;
574 }
575
576 void setModifiers(Modifiers Mods) {
577 assert(isRegKind() || isImmTy(ImmTyNone));
578 if (isRegKind())
579 Reg.Mods = Mods;
580 else
581 Imm.Mods = Mods;
582 }
583
584 bool hasModifiers() const {
585 return getModifiers().hasModifiers();
586 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000587
Sam Kolton945231a2016-06-10 09:57:59 +0000588 bool hasFPModifiers() const {
589 return getModifiers().hasFPModifiers();
590 }
591
592 bool hasIntModifiers() const {
593 return getModifiers().hasIntModifiers();
594 }
595
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000596 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
597
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000598 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000599
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000600 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000601
Matt Arsenault4bd72362016-12-10 00:39:12 +0000602 template <unsigned Bitwidth>
603 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
604
605 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
606 addKImmFPOperands<16>(Inst, N);
607 }
608
609 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
610 addKImmFPOperands<32>(Inst, N);
611 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000612
613 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000614
615 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
616 if (isRegKind())
617 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000618 else if (isExpr())
619 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000620 else
621 addImmOperands(Inst, N);
622 }
623
624 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
625 Modifiers Mods = getModifiers();
626 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
627 if (isRegKind()) {
628 addRegOperands(Inst, N);
629 } else {
630 addImmOperands(Inst, N, false);
631 }
632 }
633
634 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
635 assert(!hasIntModifiers());
636 addRegOrImmWithInputModsOperands(Inst, N);
637 }
638
639 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
640 assert(!hasFPModifiers());
641 addRegOrImmWithInputModsOperands(Inst, N);
642 }
643
Sam Kolton9772eb32017-01-11 11:46:30 +0000644 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
645 Modifiers Mods = getModifiers();
646 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
647 assert(isRegKind());
648 addRegOperands(Inst, N);
649 }
650
651 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
652 assert(!hasIntModifiers());
653 addRegWithInputModsOperands(Inst, N);
654 }
655
656 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
657 assert(!hasFPModifiers());
658 addRegWithInputModsOperands(Inst, N);
659 }
660
Sam Kolton945231a2016-06-10 09:57:59 +0000661 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
662 if (isImm())
663 addImmOperands(Inst, N);
664 else {
665 assert(isExpr());
666 Inst.addOperand(MCOperand::createExpr(Expr));
667 }
668 }
669
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000670 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000671 switch (Type) {
672 case ImmTyNone: OS << "None"; break;
673 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000674 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000675 case ImmTyOffen: OS << "Offen"; break;
676 case ImmTyIdxen: OS << "Idxen"; break;
677 case ImmTyAddr64: OS << "Addr64"; break;
678 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000679 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000680 case ImmTyOffset0: OS << "Offset0"; break;
681 case ImmTyOffset1: OS << "Offset1"; break;
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000682 case ImmTyDLC: OS << "DLC"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000683 case ImmTyGLC: OS << "GLC"; break;
684 case ImmTySLC: OS << "SLC"; break;
685 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000686 case ImmTyD16: OS << "D16"; break;
Tim Renouf35484c92018-08-21 11:06:05 +0000687 case ImmTyFORMAT: OS << "FORMAT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000688 case ImmTyClampSI: OS << "ClampSI"; break;
689 case ImmTyOModSI: OS << "OModSI"; break;
690 case ImmTyDppCtrl: OS << "DppCtrl"; break;
691 case ImmTyDppRowMask: OS << "DppRowMask"; break;
692 case ImmTyDppBankMask: OS << "DppBankMask"; break;
693 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000694 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
695 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
696 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000697 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
698 case ImmTyDMask: OS << "DMask"; break;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +0000699 case ImmTyDim: OS << "Dim"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000700 case ImmTyUNorm: OS << "UNorm"; break;
701 case ImmTyDA: OS << "DA"; break;
Ryan Taylor1f334d02018-08-28 15:07:30 +0000702 case ImmTyR128A16: OS << "R128A16"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000703 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000704 case ImmTyOff: OS << "Off"; break;
705 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000706 case ImmTyExpCompr: OS << "ExpCompr"; break;
707 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000708 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000709 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000710 case ImmTyInterpSlot: OS << "InterpSlot"; break;
711 case ImmTyInterpAttr: OS << "InterpAttr"; break;
712 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000713 case ImmTyOpSel: OS << "OpSel"; break;
714 case ImmTyOpSelHi: OS << "OpSelHi"; break;
715 case ImmTyNegLo: OS << "NegLo"; break;
716 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000717 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000718 case ImmTyGprIdxMode: OS << "GprIdxMode"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000719 case ImmTyHigh: OS << "High"; break;
David Stuttard20ea21c2019-03-12 09:52:58 +0000720 case ImmTyEndpgm:
721 OS << "Endpgm";
722 break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000723 }
724 }
725
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000726 void print(raw_ostream &OS) const override {
727 switch (Kind) {
728 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000729 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000730 break;
731 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000732 OS << '<' << getImm();
733 if (getImmTy() != ImmTyNone) {
734 OS << " type: "; printImmTy(OS, getImmTy());
735 }
Sam Kolton945231a2016-06-10 09:57:59 +0000736 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000737 break;
738 case Token:
739 OS << '\'' << getToken() << '\'';
740 break;
741 case Expression:
742 OS << "<expr " << *Expr << '>';
743 break;
744 }
745 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000746
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000747 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
748 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000749 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000750 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000751 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000752 Op->Imm.Val = Val;
753 Op->Imm.IsFPImm = IsFPImm;
754 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000755 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000756 Op->StartLoc = Loc;
757 Op->EndLoc = Loc;
758 return Op;
759 }
760
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000761 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
762 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000763 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000764 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000765 Res->Tok.Data = Str.data();
766 Res->Tok.Length = Str.size();
767 Res->StartLoc = Loc;
768 Res->EndLoc = Loc;
769 return Res;
770 }
771
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000772 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
773 unsigned RegNo, SMLoc S,
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000774 SMLoc E) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000775 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000776 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000777 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000778 Op->StartLoc = S;
779 Op->EndLoc = E;
780 return Op;
781 }
782
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000783 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
784 const class MCExpr *Expr, SMLoc S) {
785 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000786 Op->Expr = Expr;
787 Op->StartLoc = S;
788 Op->EndLoc = S;
789 return Op;
790 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000791};
792
Sam Kolton945231a2016-06-10 09:57:59 +0000793raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
794 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
795 return OS;
796}
797
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000798//===----------------------------------------------------------------------===//
799// AsmParser
800//===----------------------------------------------------------------------===//
801
Artem Tamazova01cce82016-12-27 16:00:11 +0000802// Holds info related to the current kernel, e.g. count of SGPRs used.
803// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
804// .amdgpu_hsa_kernel or at EOF.
805class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000806 int SgprIndexUnusedMin = -1;
807 int VgprIndexUnusedMin = -1;
808 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000809
810 void usesSgprAt(int i) {
811 if (i >= SgprIndexUnusedMin) {
812 SgprIndexUnusedMin = ++i;
813 if (Ctx) {
814 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
815 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
816 }
817 }
818 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000819
Artem Tamazova01cce82016-12-27 16:00:11 +0000820 void usesVgprAt(int i) {
821 if (i >= VgprIndexUnusedMin) {
822 VgprIndexUnusedMin = ++i;
823 if (Ctx) {
824 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
825 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
826 }
827 }
828 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000829
Artem Tamazova01cce82016-12-27 16:00:11 +0000830public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000831 KernelScopeInfo() = default;
832
Artem Tamazova01cce82016-12-27 16:00:11 +0000833 void initialize(MCContext &Context) {
834 Ctx = &Context;
835 usesSgprAt(SgprIndexUnusedMin = -1);
836 usesVgprAt(VgprIndexUnusedMin = -1);
837 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000838
Artem Tamazova01cce82016-12-27 16:00:11 +0000839 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
840 switch (RegKind) {
841 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
842 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
843 default: break;
844 }
845 }
846};
847
Tom Stellard45bb48e2015-06-13 03:28:10 +0000848class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000849 MCAsmParser &Parser;
850
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000851 // Number of extra operands parsed after the first optional operand.
852 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000853 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000854
Eugene Zelenko66203762017-01-21 00:53:49 +0000855 unsigned ForcedEncodingSize = 0;
856 bool ForcedDPP = false;
857 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000858 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000859
Tom Stellard45bb48e2015-06-13 03:28:10 +0000860 /// @name Auto-generated Match Functions
861 /// {
862
863#define GET_ASSEMBLER_HEADER
864#include "AMDGPUGenAsmMatcher.inc"
865
866 /// }
867
Tom Stellard347ac792015-06-26 21:15:07 +0000868private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000869 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000870 bool OutOfRangeError(SMRange Range);
871 /// Calculate VGPR/SGPR blocks required for given target, reserved
872 /// registers, and user-specified NextFreeXGPR values.
873 ///
874 /// \param Features [in] Target features, used for bug corrections.
875 /// \param VCCUsed [in] Whether VCC special SGPR is reserved.
876 /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
877 /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
878 /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
879 /// \param VGPRRange [in] Token range, used for VGPR diagnostics.
880 /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
881 /// \param SGPRRange [in] Token range, used for SGPR diagnostics.
882 /// \param VGPRBlocks [out] Result VGPR block count.
883 /// \param SGPRBlocks [out] Result SGPR block count.
884 bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
885 bool FlatScrUsed, bool XNACKUsed,
886 unsigned NextFreeVGPR, SMRange VGPRRange,
887 unsigned NextFreeSGPR, SMRange SGPRRange,
888 unsigned &VGPRBlocks, unsigned &SGPRBlocks);
889 bool ParseDirectiveAMDGCNTarget();
890 bool ParseDirectiveAMDHSAKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000891 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
892 bool ParseDirectiveHSACodeObjectVersion();
893 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000894 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
895 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000896 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000897 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000898
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000899 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +0000900 bool ParseDirectiveHSAMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +0000901 bool ParseDirectivePALMetadataBegin();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000902 bool ParseDirectivePALMetadata();
903
Tim Renoufe7bd52f2019-03-20 18:47:21 +0000904 /// Common code to parse out a block of text (typically YAML) between start and
905 /// end directives.
906 bool ParseToEndDirective(const char *AssemblerDirectiveBegin,
907 const char *AssemblerDirectiveEnd,
908 std::string &CollectString);
909
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000910 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
911 RegisterKind RegKind, unsigned Reg1,
912 unsigned RegNum);
913 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
914 unsigned& RegNum, unsigned& RegWidth,
915 unsigned *DwordRegIndex);
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +0000916 bool isRegister();
917 bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000918 Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
919 void initializeGprCountSymbol(RegisterKind RegKind);
920 bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
921 unsigned RegWidth);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000922 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +0000923 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000924 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
925 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000926
Tom Stellard45bb48e2015-06-13 03:28:10 +0000927public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000928 enum AMDGPUMatchResultTy {
929 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
930 };
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +0000931 enum OperandMode {
932 OperandMode_Default,
933 OperandMode_NSA,
934 };
Tom Stellard88e0b252015-10-06 15:57:53 +0000935
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000936 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000937
Akira Hatanakab11ef082015-11-14 06:35:56 +0000938 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000939 const MCInstrInfo &MII,
940 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +0000941 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000942 MCAsmParserExtension::Initialize(Parser);
943
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000944 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000945 // Set default features.
Matt Arsenault45c165b2019-04-03 00:01:03 +0000946 copySTI().ToggleFeature("southern-islands");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000947 }
948
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000949 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000950
951 {
952 // TODO: make those pre-defined variables read-only.
953 // Currently there is none suitable machinery in the core llvm-mc for this.
954 // MCSymbol::isRedefinable is intended for another purpose, and
955 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000956 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Artem Tamazov17091362016-06-14 15:03:59 +0000957 MCContext &Ctx = getContext();
Scott Linder1e8c2c72018-06-21 19:38:56 +0000958 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
959 MCSymbol *Sym =
960 Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
961 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Dmitry Preobrazhensky62a03182019-02-08 13:51:31 +0000962 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_minor"));
963 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
964 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_stepping"));
965 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000966 } else {
967 MCSymbol *Sym =
968 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
969 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
970 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
971 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
972 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
973 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
974 }
975 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
976 initializeGprCountSymbol(IS_VGPR);
977 initializeGprCountSymbol(IS_SGPR);
978 } else
979 KernelScope.initialize(getContext());
Artem Tamazov17091362016-06-14 15:03:59 +0000980 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000981 }
982
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000983 bool hasXNACK() const {
984 return AMDGPU::hasXNACK(getSTI());
985 }
986
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000987 bool hasMIMG_R128() const {
988 return AMDGPU::hasMIMG_R128(getSTI());
989 }
990
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000991 bool hasPackedD16() const {
992 return AMDGPU::hasPackedD16(getSTI());
993 }
994
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000995 bool isSI() const {
996 return AMDGPU::isSI(getSTI());
997 }
998
999 bool isCI() const {
1000 return AMDGPU::isCI(getSTI());
1001 }
1002
1003 bool isVI() const {
1004 return AMDGPU::isVI(getSTI());
1005 }
1006
Sam Koltonf7659d712017-05-23 10:08:55 +00001007 bool isGFX9() const {
1008 return AMDGPU::isGFX9(getSTI());
1009 }
1010
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001011 bool isGFX10() const {
1012 return AMDGPU::isGFX10(getSTI());
1013 }
1014
Matt Arsenault26faed32016-12-05 22:26:17 +00001015 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001016 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +00001017 }
1018
Matt Arsenaultfd023142017-06-12 15:55:58 +00001019 bool hasFlatOffsets() const {
1020 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
1021 }
1022
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001023 bool hasSGPR102_SGPR103() const {
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00001024 return !isVI() && !isGFX9();
1025 }
1026
1027 bool hasSGPR104_SGPR105() const {
1028 return isGFX10();
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001029 }
1030
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001031 bool hasIntClamp() const {
1032 return getFeatureBits()[AMDGPU::FeatureIntClamp];
1033 }
1034
Tom Stellard347ac792015-06-26 21:15:07 +00001035 AMDGPUTargetStreamer &getTargetStreamer() {
1036 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
1037 return static_cast<AMDGPUTargetStreamer &>(TS);
1038 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001039
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001040 const MCRegisterInfo *getMRI() const {
1041 // We need this const_cast because for some reason getContext() is not const
1042 // in MCAsmParser.
1043 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
1044 }
1045
1046 const MCInstrInfo *getMII() const {
1047 return &MII;
1048 }
1049
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001050 const FeatureBitset &getFeatureBits() const {
1051 return getSTI().getFeatureBits();
1052 }
1053
Sam Kolton05ef1c92016-06-03 10:27:37 +00001054 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
1055 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1056 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +00001057
Sam Kolton05ef1c92016-06-03 10:27:37 +00001058 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
1059 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
1060 bool isForcedDPP() const { return ForcedDPP; }
1061 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +00001062 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001063
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001064 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +00001065 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1066 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +00001067 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
1068 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001069 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1070 OperandVector &Operands, MCStreamer &Out,
1071 uint64_t &ErrorInfo,
1072 bool MatchingInlineAsm) override;
1073 bool ParseDirective(AsmToken DirectiveID) override;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001074 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic,
1075 OperandMode Mode = OperandMode_Default);
Sam Kolton05ef1c92016-06-03 10:27:37 +00001076 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001077 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1078 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001079 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001080
Sam Kolton11de3702016-05-24 12:38:33 +00001081 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001082
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001083 OperandMatchResultTy
1084 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001085 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001086 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001087
1088 OperandMatchResultTy parseOperandArrayWithPrefix(
1089 const char *Prefix,
1090 OperandVector &Operands,
1091 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1092 bool (*ConvertResult)(int64_t&) = nullptr);
1093
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001094 OperandMatchResultTy
1095 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001096 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001097 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1098 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001099
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00001100 bool isModifier();
1101 bool isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
1102 bool isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
1103 bool isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
1104 bool isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const;
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001105 bool parseSP3NegModifier();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001106 OperandMatchResultTy parseImm(OperandVector &Operands, bool HasSP3AbsModifier = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001107 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00001108 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001109 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1110 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1111 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1112 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001113 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Tim Renouf35484c92018-08-21 11:06:05 +00001114 OperandMatchResultTy parseDfmtNfmt(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001115
Tom Stellard45bb48e2015-06-13 03:28:10 +00001116 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001117 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1118 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001119 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001120
1121 bool parseCnt(int64_t &IntVal);
1122 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001123 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001124
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001125private:
1126 struct OperandInfoTy {
1127 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001128 bool IsSymbolic = false;
1129
1130 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001131 };
Sam Kolton11de3702016-05-24 12:38:33 +00001132
Artem Tamazov6edc1352016-05-26 17:00:33 +00001133 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1134 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001135
1136 void errorExpTgt();
1137 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1138
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001139 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00001140 bool validateSOPLiteral(const MCInst &Inst) const;
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001141 bool validateConstantBusLimitations(const MCInst &Inst);
1142 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001143 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001144 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001145 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001146 bool validateMIMGDataSize(const MCInst &Inst);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001147 bool validateMIMGAddrSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001148 bool validateMIMGD16(const MCInst &Inst);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001149 bool validateMIMGDim(const MCInst &Inst);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001150 bool validateLdsDirect(const MCInst &Inst);
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001151 bool validateVOP3Literal(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001152 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1153 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1154 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001155
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001156 bool isId(const StringRef Id) const;
1157 bool isId(const AsmToken &Token, const StringRef Id) const;
1158 bool isToken(const AsmToken::TokenKind Kind) const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001159 bool trySkipId(const StringRef Id);
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00001160 bool trySkipId(const StringRef Id, const AsmToken::TokenKind Kind);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001161 bool trySkipToken(const AsmToken::TokenKind Kind);
1162 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1163 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001164 void peekTokens(MutableArrayRef<AsmToken> Tokens);
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001165 AsmToken::TokenKind getTokenKind() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001166 bool parseExpr(int64_t &Imm);
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001167 StringRef getTokenStr() const;
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001168 AsmToken peekToken();
1169 AsmToken getToken() const;
1170 SMLoc getLoc() const;
1171 void lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001172
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001173public:
Sam Kolton11de3702016-05-24 12:38:33 +00001174 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001175 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001176
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001177 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001178 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001179 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1180 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001181 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1182
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001183 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1184 const unsigned MinVal,
1185 const unsigned MaxVal,
1186 const StringRef ErrMsg);
1187 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1188 bool parseSwizzleOffset(int64_t &Imm);
1189 bool parseSwizzleMacro(int64_t &Imm);
1190 bool parseSwizzleQuadPerm(int64_t &Imm);
1191 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1192 bool parseSwizzleBroadcast(int64_t &Imm);
1193 bool parseSwizzleSwap(int64_t &Imm);
1194 bool parseSwizzleReverse(int64_t &Imm);
1195
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00001196 OperandMatchResultTy parseGPRIdxMode(OperandVector &Operands);
1197 int64_t parseGPRIdxMacro();
1198
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001199 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1200 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1201 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001202 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001203 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1204
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001205 AMDGPUOperand::Ptr defaultDLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001206 AMDGPUOperand::Ptr defaultGLC() const;
1207 AMDGPUOperand::Ptr defaultSLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001208
Artem Tamazov54bfd542016-10-31 16:07:39 +00001209 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1210 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001211 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001212 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001213 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001214
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001215 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1216
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001217 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1218 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001219 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001220 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001221 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001222
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001223 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1224
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001225 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1226 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001227 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001228
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001229 OperandMatchResultTy parseDim(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001230 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001231 AMDGPUOperand::Ptr defaultRowMask() const;
1232 AMDGPUOperand::Ptr defaultBankMask() const;
1233 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1234 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001235
Sam Kolton05ef1c92016-06-03 10:27:37 +00001236 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1237 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001238 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001239 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1240 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001241 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001242 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1243 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001244 uint64_t BasicInstType, bool skipVcc = false);
David Stuttard20ea21c2019-03-12 09:52:58 +00001245
1246 OperandMatchResultTy parseEndpgmOp(OperandVector &Operands);
1247 AMDGPUOperand::Ptr defaultEndpgmImmOperands() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001248};
1249
1250struct OptionalOperand {
1251 const char *Name;
1252 AMDGPUOperand::ImmTy Type;
1253 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001254 bool (*ConvertResult)(int64_t&);
1255};
1256
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001257} // end anonymous namespace
1258
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001259// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001260static const fltSemantics *getFltSemantics(unsigned Size) {
1261 switch (Size) {
1262 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001263 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001264 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001265 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001266 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001267 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001268 default:
1269 llvm_unreachable("unsupported fp type");
1270 }
1271}
1272
Matt Arsenault4bd72362016-12-10 00:39:12 +00001273static const fltSemantics *getFltSemantics(MVT VT) {
1274 return getFltSemantics(VT.getSizeInBits() / 8);
1275}
1276
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001277static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1278 switch (OperandType) {
1279 case AMDGPU::OPERAND_REG_IMM_INT32:
1280 case AMDGPU::OPERAND_REG_IMM_FP32:
1281 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1282 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1283 return &APFloat::IEEEsingle();
1284 case AMDGPU::OPERAND_REG_IMM_INT64:
1285 case AMDGPU::OPERAND_REG_IMM_FP64:
1286 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1287 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1288 return &APFloat::IEEEdouble();
1289 case AMDGPU::OPERAND_REG_IMM_INT16:
1290 case AMDGPU::OPERAND_REG_IMM_FP16:
1291 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1292 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1293 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1294 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001295 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1296 case AMDGPU::OPERAND_REG_IMM_V2FP16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001297 return &APFloat::IEEEhalf();
1298 default:
1299 llvm_unreachable("unsupported fp type");
1300 }
1301}
1302
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001303//===----------------------------------------------------------------------===//
1304// Operand
1305//===----------------------------------------------------------------------===//
1306
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001307static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1308 bool Lost;
1309
1310 // Convert literal to single precision
1311 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1312 APFloat::rmNearestTiesToEven,
1313 &Lost);
1314 // We allow precision lost but not overflow or underflow
1315 if (Status != APFloat::opOK &&
1316 Lost &&
1317 ((Status & APFloat::opOverflow) != 0 ||
1318 (Status & APFloat::opUnderflow) != 0)) {
1319 return false;
1320 }
1321
1322 return true;
1323}
1324
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001325static bool isSafeTruncation(int64_t Val, unsigned Size) {
1326 return isUIntN(Size, Val) || isIntN(Size, Val);
1327}
1328
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001329bool AMDGPUOperand::isInlinableImm(MVT type) const {
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001330
1331 // This is a hack to enable named inline values like
1332 // shared_base with both 32-bit and 64-bit operands.
1333 // Note that these values are defined as
1334 // 32-bit operands only.
1335 if (isInlineValue()) {
1336 return true;
1337 }
1338
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001339 if (!isImmTy(ImmTyNone)) {
1340 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1341 return false;
1342 }
1343 // TODO: We should avoid using host float here. It would be better to
1344 // check the float bit values which is what a few other places do.
1345 // We've had bot failures before due to weird NaN support on mips hosts.
1346
1347 APInt Literal(64, Imm.Val);
1348
1349 if (Imm.IsFPImm) { // We got fp literal token
1350 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001351 return AMDGPU::isInlinableLiteral64(Imm.Val,
1352 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001353 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001354
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001355 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001356 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1357 return false;
1358
Sam Kolton9dffada2017-01-17 15:26:02 +00001359 if (type.getScalarSizeInBits() == 16) {
1360 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001361 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001362 AsmParser->hasInv2PiInlineImm());
1363 }
1364
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001365 // Check if single precision literal is inlinable
1366 return AMDGPU::isInlinableLiteral32(
1367 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001368 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001369 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001370
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001371 // We got int literal token.
1372 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001373 return AMDGPU::isInlinableLiteral64(Imm.Val,
1374 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001375 }
1376
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001377 if (!isSafeTruncation(Imm.Val, type.getScalarSizeInBits())) {
1378 return false;
1379 }
1380
Matt Arsenault4bd72362016-12-10 00:39:12 +00001381 if (type.getScalarSizeInBits() == 16) {
1382 return AMDGPU::isInlinableLiteral16(
1383 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1384 AsmParser->hasInv2PiInlineImm());
1385 }
1386
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001387 return AMDGPU::isInlinableLiteral32(
1388 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001389 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001390}
1391
1392bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001393 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001394 if (!isImmTy(ImmTyNone)) {
1395 return false;
1396 }
1397
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001398 if (!Imm.IsFPImm) {
1399 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001400
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001401 if (type == MVT::f64 && hasFPModifiers()) {
1402 // Cannot apply fp modifiers to int literals preserving the same semantics
1403 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1404 // disable these cases.
1405 return false;
1406 }
1407
Matt Arsenault4bd72362016-12-10 00:39:12 +00001408 unsigned Size = type.getSizeInBits();
1409 if (Size == 64)
1410 Size = 32;
1411
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001412 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1413 // types.
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001414 return isSafeTruncation(Imm.Val, Size);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001415 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001416
1417 // We got fp literal token
1418 if (type == MVT::f64) { // Expected 64-bit fp operand
1419 // We would set low 64-bits of literal to zeroes but we accept this literals
1420 return true;
1421 }
1422
1423 if (type == MVT::i64) { // Expected 64-bit int operand
1424 // We don't allow fp literals in 64-bit integer instructions. It is
1425 // unclear how we should encode them.
1426 return false;
1427 }
1428
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001429 // We allow fp literals with f16x2 operands assuming that the specified
1430 // literal goes into the lower half and the upper half is zero. We also
1431 // require that the literal may be losslesly converted to f16.
1432 MVT ExpectedType = (type == MVT::v2f16)? MVT::f16 :
1433 (type == MVT::v2i16)? MVT::i16 : type;
1434
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001435 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001436 return canLosslesslyConvertToFPType(FPLiteral, ExpectedType);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001437}
1438
1439bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001440 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001441}
1442
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001443bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001444 if (AsmParser->isVI())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001445 return isVReg32();
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001446 else if (AsmParser->isGFX9() || AsmParser->isGFX10())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001447 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001448 else
1449 return false;
1450}
1451
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001452bool AMDGPUOperand::isSDWAFP16Operand() const {
1453 return isSDWAOperand(MVT::f16);
1454}
1455
1456bool AMDGPUOperand::isSDWAFP32Operand() const {
1457 return isSDWAOperand(MVT::f32);
1458}
1459
1460bool AMDGPUOperand::isSDWAInt16Operand() const {
1461 return isSDWAOperand(MVT::i16);
1462}
1463
1464bool AMDGPUOperand::isSDWAInt32Operand() const {
1465 return isSDWAOperand(MVT::i32);
1466}
1467
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001468uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1469{
1470 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1471 assert(Size == 2 || Size == 4 || Size == 8);
1472
1473 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1474
1475 if (Imm.Mods.Abs) {
1476 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001477 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001478 if (Imm.Mods.Neg) {
1479 Val ^= FpSignMask;
1480 }
1481
1482 return Val;
1483}
1484
1485void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001486 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1487 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001488 addLiteralImmOperand(Inst, Imm.Val,
1489 ApplyModifiers &
1490 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001491 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001492 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1493 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001494 }
1495}
1496
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001497void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001498 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1499 auto OpNum = Inst.getNumOperands();
1500 // Check that this operand accepts literals
1501 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1502
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001503 if (ApplyModifiers) {
1504 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1505 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1506 Val = applyInputFPModifiers(Val, Size);
1507 }
1508
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001509 APInt Literal(64, Val);
1510 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001511
1512 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001513 switch (OpTy) {
1514 case AMDGPU::OPERAND_REG_IMM_INT64:
1515 case AMDGPU::OPERAND_REG_IMM_FP64:
1516 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001517 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001518 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1519 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001520 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001521 return;
1522 }
1523
1524 // Non-inlineable
1525 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001526 // For fp operands we check if low 32 bits are zeros
1527 if (Literal.getLoBits(32) != 0) {
1528 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001529 "Can't encode literal as exact 64-bit floating-point operand. "
1530 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001531 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001532
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001533 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001534 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001535 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001536
1537 // We don't allow fp literals in 64-bit integer instructions. It is
1538 // unclear how we should encode them. This case should be checked earlier
1539 // in predicate methods (isLiteralImm())
1540 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001541
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001542 case AMDGPU::OPERAND_REG_IMM_INT32:
1543 case AMDGPU::OPERAND_REG_IMM_FP32:
1544 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1545 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1546 case AMDGPU::OPERAND_REG_IMM_INT16:
1547 case AMDGPU::OPERAND_REG_IMM_FP16:
1548 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1549 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1550 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001551 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1552 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1553 case AMDGPU::OPERAND_REG_IMM_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001554 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001555 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001556 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001557 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001558 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001559 // We allow precision lost but not overflow or underflow. This should be
1560 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001561
1562 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001563 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001564 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001565 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001566 default:
1567 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001568 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001569
1570 return;
1571 }
1572
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001573 // We got int literal token.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001574 // Only sign extend inline immediates.
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001575 switch (OpTy) {
1576 case AMDGPU::OPERAND_REG_IMM_INT32:
1577 case AMDGPU::OPERAND_REG_IMM_FP32:
1578 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001579 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001580 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1581 case AMDGPU::OPERAND_REG_IMM_V2FP16:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001582 if (isSafeTruncation(Val, 32) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001583 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1584 AsmParser->hasInv2PiInlineImm())) {
1585 Inst.addOperand(MCOperand::createImm(Val));
1586 return;
1587 }
1588
1589 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1590 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001591
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001592 case AMDGPU::OPERAND_REG_IMM_INT64:
1593 case AMDGPU::OPERAND_REG_IMM_FP64:
1594 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001595 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001596 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001597 Inst.addOperand(MCOperand::createImm(Val));
1598 return;
1599 }
1600
1601 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1602 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001603
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001604 case AMDGPU::OPERAND_REG_IMM_INT16:
1605 case AMDGPU::OPERAND_REG_IMM_FP16:
1606 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001607 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001608 if (isSafeTruncation(Val, 16) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001609 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1610 AsmParser->hasInv2PiInlineImm())) {
1611 Inst.addOperand(MCOperand::createImm(Val));
1612 return;
1613 }
1614
1615 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1616 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001617
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001618 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1619 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001620 assert(isSafeTruncation(Val, 16));
1621 assert(AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001622 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001623
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001624 Inst.addOperand(MCOperand::createImm(Val));
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001625 return;
1626 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001627 default:
1628 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001629 }
1630}
1631
Matt Arsenault4bd72362016-12-10 00:39:12 +00001632template <unsigned Bitwidth>
1633void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001634 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001635
1636 if (!Imm.IsFPImm) {
1637 // We got int literal token.
1638 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1639 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001640 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001641
1642 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001643 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001644 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1645 APFloat::rmNearestTiesToEven, &Lost);
1646 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001647}
1648
1649void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1650 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1651}
1652
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001653static bool isInlineValue(unsigned Reg) {
1654 switch (Reg) {
1655 case AMDGPU::SRC_SHARED_BASE:
1656 case AMDGPU::SRC_SHARED_LIMIT:
1657 case AMDGPU::SRC_PRIVATE_BASE:
1658 case AMDGPU::SRC_PRIVATE_LIMIT:
1659 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
1660 return true;
1661 default:
1662 return false;
1663 }
1664}
1665
1666bool AMDGPUOperand::isInlineValue() const {
1667 return isRegKind() && ::isInlineValue(getReg());
1668}
1669
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001670//===----------------------------------------------------------------------===//
1671// AsmParser
1672//===----------------------------------------------------------------------===//
1673
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001674static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1675 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001676 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001677 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001678 case 1: return AMDGPU::VGPR_32RegClassID;
1679 case 2: return AMDGPU::VReg_64RegClassID;
1680 case 3: return AMDGPU::VReg_96RegClassID;
1681 case 4: return AMDGPU::VReg_128RegClassID;
1682 case 8: return AMDGPU::VReg_256RegClassID;
1683 case 16: return AMDGPU::VReg_512RegClassID;
1684 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001685 } else if (Is == IS_TTMP) {
1686 switch (RegWidth) {
1687 default: return -1;
1688 case 1: return AMDGPU::TTMP_32RegClassID;
1689 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001690 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001691 case 8: return AMDGPU::TTMP_256RegClassID;
1692 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001693 }
1694 } else if (Is == IS_SGPR) {
1695 switch (RegWidth) {
1696 default: return -1;
1697 case 1: return AMDGPU::SGPR_32RegClassID;
1698 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001699 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001700 case 8: return AMDGPU::SGPR_256RegClassID;
1701 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001702 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001703 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001704 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001705}
1706
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001707static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001708 return StringSwitch<unsigned>(RegName)
1709 .Case("exec", AMDGPU::EXEC)
1710 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001711 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001712 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001713 .Case("shared_base", AMDGPU::SRC_SHARED_BASE)
1714 .Case("src_shared_base", AMDGPU::SRC_SHARED_BASE)
1715 .Case("shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1716 .Case("src_shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1717 .Case("private_base", AMDGPU::SRC_PRIVATE_BASE)
1718 .Case("src_private_base", AMDGPU::SRC_PRIVATE_BASE)
1719 .Case("private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1720 .Case("src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1721 .Case("pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
1722 .Case("src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001723 .Case("lds_direct", AMDGPU::LDS_DIRECT)
1724 .Case("src_lds_direct", AMDGPU::LDS_DIRECT)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001725 .Case("m0", AMDGPU::M0)
1726 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001727 .Case("tba", AMDGPU::TBA)
1728 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001729 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1730 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001731 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1732 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001733 .Case("vcc_lo", AMDGPU::VCC_LO)
1734 .Case("vcc_hi", AMDGPU::VCC_HI)
1735 .Case("exec_lo", AMDGPU::EXEC_LO)
1736 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001737 .Case("tma_lo", AMDGPU::TMA_LO)
1738 .Case("tma_hi", AMDGPU::TMA_HI)
1739 .Case("tba_lo", AMDGPU::TBA_LO)
1740 .Case("tba_hi", AMDGPU::TBA_HI)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00001741 .Case("null", AMDGPU::SGPR_NULL)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001742 .Default(0);
1743}
1744
Eugene Zelenko66203762017-01-21 00:53:49 +00001745bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1746 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001747 auto R = parseRegister();
1748 if (!R) return true;
1749 assert(R->isReg());
1750 RegNo = R->getReg();
1751 StartLoc = R->getStartLoc();
1752 EndLoc = R->getEndLoc();
1753 return false;
1754}
1755
Eugene Zelenko66203762017-01-21 00:53:49 +00001756bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1757 RegisterKind RegKind, unsigned Reg1,
1758 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001759 switch (RegKind) {
1760 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001761 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1762 Reg = AMDGPU::EXEC;
1763 RegWidth = 2;
1764 return true;
1765 }
1766 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1767 Reg = AMDGPU::FLAT_SCR;
1768 RegWidth = 2;
1769 return true;
1770 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001771 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1772 Reg = AMDGPU::XNACK_MASK;
1773 RegWidth = 2;
1774 return true;
1775 }
Eugene Zelenko66203762017-01-21 00:53:49 +00001776 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1777 Reg = AMDGPU::VCC;
1778 RegWidth = 2;
1779 return true;
1780 }
1781 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1782 Reg = AMDGPU::TBA;
1783 RegWidth = 2;
1784 return true;
1785 }
1786 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1787 Reg = AMDGPU::TMA;
1788 RegWidth = 2;
1789 return true;
1790 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001791 return false;
1792 case IS_VGPR:
1793 case IS_SGPR:
1794 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001795 if (Reg1 != Reg + RegWidth) {
1796 return false;
1797 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001798 RegWidth++;
1799 return true;
1800 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001801 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001802 }
1803}
1804
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001805static const StringRef Registers[] = {
1806 { "v" },
1807 { "s" },
1808 { "ttmp" },
1809};
1810
1811bool
1812AMDGPUAsmParser::isRegister(const AsmToken &Token,
1813 const AsmToken &NextToken) const {
1814
1815 // A list of consecutive registers: [s0,s1,s2,s3]
1816 if (Token.is(AsmToken::LBrac))
1817 return true;
1818
1819 if (!Token.is(AsmToken::Identifier))
1820 return false;
1821
1822 // A single register like s0 or a range of registers like s[0:1]
1823
1824 StringRef RegName = Token.getString();
1825
1826 for (StringRef Reg : Registers) {
1827 if (RegName.startswith(Reg)) {
1828 if (Reg.size() < RegName.size()) {
1829 unsigned RegNum;
1830 // A single register with an index: rXX
1831 if (!RegName.substr(Reg.size()).getAsInteger(10, RegNum))
1832 return true;
1833 } else {
1834 // A range of registers: r[XX:YY].
1835 if (NextToken.is(AsmToken::LBrac))
1836 return true;
1837 }
1838 }
1839 }
1840
1841 return getSpecialRegForName(RegName);
1842}
1843
1844bool
1845AMDGPUAsmParser::isRegister()
1846{
1847 return isRegister(getToken(), peekToken());
1848}
1849
Eugene Zelenko66203762017-01-21 00:53:49 +00001850bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1851 unsigned &RegNum, unsigned &RegWidth,
1852 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001853 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001854 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1855 if (getLexer().is(AsmToken::Identifier)) {
1856 StringRef RegName = Parser.getTok().getString();
1857 if ((Reg = getSpecialRegForName(RegName))) {
1858 Parser.Lex();
1859 RegKind = IS_SPECIAL;
1860 } else {
1861 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001862 if (RegName[0] == 'v') {
1863 RegNumIndex = 1;
1864 RegKind = IS_VGPR;
1865 } else if (RegName[0] == 's') {
1866 RegNumIndex = 1;
1867 RegKind = IS_SGPR;
1868 } else if (RegName.startswith("ttmp")) {
1869 RegNumIndex = strlen("ttmp");
1870 RegKind = IS_TTMP;
1871 } else {
1872 return false;
1873 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001874 if (RegName.size() > RegNumIndex) {
1875 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001876 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1877 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001878 Parser.Lex();
1879 RegWidth = 1;
1880 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001881 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001882 Parser.Lex();
1883 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001884 if (getLexer().isNot(AsmToken::LBrac))
1885 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001886 Parser.Lex();
1887
Artem Tamazovf88397c2016-06-03 14:41:17 +00001888 if (getParser().parseAbsoluteExpression(RegLo))
1889 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001890
Artem Tamazov7da9b822016-05-27 12:50:13 +00001891 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001892 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1893 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001894 Parser.Lex();
1895
Artem Tamazov7da9b822016-05-27 12:50:13 +00001896 if (isRBrace) {
1897 RegHi = RegLo;
1898 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001899 if (getParser().parseAbsoluteExpression(RegHi))
1900 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001901
Artem Tamazovf88397c2016-06-03 14:41:17 +00001902 if (getLexer().isNot(AsmToken::RBrac))
1903 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001904 Parser.Lex();
1905 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001906 RegNum = (unsigned) RegLo;
1907 RegWidth = (RegHi - RegLo) + 1;
1908 }
1909 }
1910 } else if (getLexer().is(AsmToken::LBrac)) {
1911 // List of consecutive registers: [s0,s1,s2,s3]
1912 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001913 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001914 return false;
1915 if (RegWidth != 1)
1916 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001917 RegisterKind RegKind1;
1918 unsigned Reg1, RegNum1, RegWidth1;
1919 do {
1920 if (getLexer().is(AsmToken::Comma)) {
1921 Parser.Lex();
1922 } else if (getLexer().is(AsmToken::RBrac)) {
1923 Parser.Lex();
1924 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001925 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001926 if (RegWidth1 != 1) {
1927 return false;
1928 }
1929 if (RegKind1 != RegKind) {
1930 return false;
1931 }
1932 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1933 return false;
1934 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001935 } else {
1936 return false;
1937 }
1938 } while (true);
1939 } else {
1940 return false;
1941 }
1942 switch (RegKind) {
1943 case IS_SPECIAL:
1944 RegNum = 0;
1945 RegWidth = 1;
1946 break;
1947 case IS_VGPR:
1948 case IS_SGPR:
1949 case IS_TTMP:
1950 {
1951 unsigned Size = 1;
1952 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001953 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001954 Size = std::min(RegWidth, 4u);
1955 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001956 if (RegNum % Size != 0)
1957 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001958 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001959 RegNum = RegNum / Size;
1960 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001961 if (RCID == -1)
1962 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001963 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001964 if (RegNum >= RC.getNumRegs())
1965 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001966 Reg = RC.getRegister(RegNum);
1967 break;
1968 }
1969
1970 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001971 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001972 }
1973
Artem Tamazovf88397c2016-06-03 14:41:17 +00001974 if (!subtargetHasRegister(*TRI, Reg))
1975 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001976 return true;
1977}
1978
Scott Linder1e8c2c72018-06-21 19:38:56 +00001979Optional<StringRef>
1980AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
1981 switch (RegKind) {
1982 case IS_VGPR:
1983 return StringRef(".amdgcn.next_free_vgpr");
1984 case IS_SGPR:
1985 return StringRef(".amdgcn.next_free_sgpr");
1986 default:
1987 return None;
1988 }
1989}
1990
1991void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
1992 auto SymbolName = getGprCountSymbolName(RegKind);
1993 assert(SymbolName && "initializing invalid register kind");
1994 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1995 Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
1996}
1997
1998bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
1999 unsigned DwordRegIndex,
2000 unsigned RegWidth) {
2001 // Symbols are only defined for GCN targets
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002002 if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6)
Scott Linder1e8c2c72018-06-21 19:38:56 +00002003 return true;
2004
2005 auto SymbolName = getGprCountSymbolName(RegKind);
2006 if (!SymbolName)
2007 return true;
2008 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
2009
2010 int64_t NewMax = DwordRegIndex + RegWidth - 1;
2011 int64_t OldCount;
2012
2013 if (!Sym->isVariable())
2014 return !Error(getParser().getTok().getLoc(),
2015 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
2016 if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
2017 return !Error(
2018 getParser().getTok().getLoc(),
2019 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
2020
2021 if (OldCount <= NewMax)
2022 Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
2023
2024 return true;
2025}
2026
Valery Pykhtin0f97f172016-03-14 07:43:42 +00002027std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002028 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00002029 SMLoc StartLoc = Tok.getLoc();
2030 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002031 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00002032 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002033
Artem Tamazova01cce82016-12-27 16:00:11 +00002034 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002035 //FIXME: improve error messages (bug 41303).
2036 Error(StartLoc, "not a valid operand.");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002037 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002038 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00002039 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
2040 if (!updateGprCountSymbols(RegKind, DwordRegIndex, RegWidth))
2041 return nullptr;
2042 } else
2043 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002044 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002045}
2046
Alex Bradbury58eba092016-11-01 16:32:05 +00002047OperandMatchResultTy
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002048AMDGPUAsmParser::parseImm(OperandVector &Operands, bool HasSP3AbsModifier) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002049 // TODO: add syntactic sugar for 1/(2*PI)
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002050
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002051 assert(!isRegister());
2052 assert(!isModifier());
2053
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002054 const auto& Tok = getToken();
2055 const auto& NextTok = peekToken();
2056 bool IsReal = Tok.is(AsmToken::Real);
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002057 SMLoc S = getLoc();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002058 bool Negate = false;
2059
2060 if (!IsReal && Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Real)) {
2061 lex();
2062 IsReal = true;
2063 Negate = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002064 }
2065
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002066 if (IsReal) {
2067 // Floating-point expressions are not supported.
2068 // Can only allow floating-point literals with an
2069 // optional sign.
2070
2071 StringRef Num = getTokenStr();
2072 lex();
2073
2074 APFloat RealVal(APFloat::IEEEdouble());
2075 auto roundMode = APFloat::rmNearestTiesToEven;
2076 if (RealVal.convertFromString(Num, roundMode) == APFloat::opInvalidOp) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002077 return MatchOperand_ParseFail;
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002078 }
2079 if (Negate)
2080 RealVal.changeSign();
2081
2082 Operands.push_back(
2083 AMDGPUOperand::CreateImm(this, RealVal.bitcastToAPInt().getZExtValue(), S,
2084 AMDGPUOperand::ImmTyNone, true));
2085
2086 return MatchOperand_Success;
2087
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002088 } else {
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002089 int64_t IntVal;
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002090 const MCExpr *Expr;
2091 SMLoc S = getLoc();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002092
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002093 if (HasSP3AbsModifier) {
2094 // This is a workaround for handling expressions
2095 // as arguments of SP3 'abs' modifier, for example:
2096 // |1.0|
2097 // |-1|
2098 // |1+x|
2099 // This syntax is not compatible with syntax of standard
2100 // MC expressions (due to the trailing '|').
2101 SMLoc EndLoc;
2102 if (getParser().parsePrimaryExpr(Expr, EndLoc))
2103 return MatchOperand_ParseFail;
2104 } else {
2105 if (Parser.parseExpression(Expr))
2106 return MatchOperand_ParseFail;
2107 }
2108
2109 if (Expr->evaluateAsAbsolute(IntVal)) {
2110 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
2111 } else {
2112 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
2113 }
2114
Sam Kolton1bdcef72016-05-23 09:59:02 +00002115 return MatchOperand_Success;
2116 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002117
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002118 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002119}
2120
Alex Bradbury58eba092016-11-01 16:32:05 +00002121OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00002122AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002123 if (!isRegister())
2124 return MatchOperand_NoMatch;
2125
Sam Kolton1bdcef72016-05-23 09:59:02 +00002126 if (auto R = parseRegister()) {
2127 assert(R->isReg());
Sam Kolton1bdcef72016-05-23 09:59:02 +00002128 Operands.push_back(std::move(R));
2129 return MatchOperand_Success;
2130 }
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002131 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002132}
2133
Alex Bradbury58eba092016-11-01 16:32:05 +00002134OperandMatchResultTy
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002135AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002136 auto res = parseReg(Operands);
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002137 if (res != MatchOperand_NoMatch) {
2138 return res;
2139 } else if (isModifier()) {
2140 return MatchOperand_NoMatch;
2141 } else {
2142 return parseImm(Operands, HasSP3AbsMod);
2143 }
2144}
2145
2146bool
2147AMDGPUAsmParser::isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
2148 if (Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::LParen)) {
2149 const auto &str = Token.getString();
2150 return str == "abs" || str == "neg" || str == "sext";
2151 }
2152 return false;
2153}
2154
2155bool
2156AMDGPUAsmParser::isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const {
2157 return Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::Colon);
2158}
2159
2160bool
2161AMDGPUAsmParser::isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
2162 return isNamedOperandModifier(Token, NextToken) || Token.is(AsmToken::Pipe);
2163}
2164
2165bool
2166AMDGPUAsmParser::isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
2167 return isRegister(Token, NextToken) || isOperandModifier(Token, NextToken);
2168}
2169
2170// Check if this is an operand modifier or an opcode modifier
2171// which may look like an expression but it is not. We should
2172// avoid parsing these modifiers as expressions. Currently
2173// recognized sequences are:
2174// |...|
2175// abs(...)
2176// neg(...)
2177// sext(...)
2178// -reg
2179// -|...|
2180// -abs(...)
2181// name:...
2182// Note that simple opcode modifiers like 'gds' may be parsed as
2183// expressions; this is a special case. See getExpressionAsToken.
2184//
2185bool
2186AMDGPUAsmParser::isModifier() {
2187
2188 AsmToken Tok = getToken();
2189 AsmToken NextToken[2];
2190 peekTokens(NextToken);
2191
2192 return isOperandModifier(Tok, NextToken[0]) ||
2193 (Tok.is(AsmToken::Minus) && isRegOrOperandModifier(NextToken[0], NextToken[1])) ||
2194 isOpcodeModifierWithVal(Tok, NextToken[0]);
Sam Kolton9772eb32017-01-11 11:46:30 +00002195}
2196
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002197// Check if the current token is an SP3 'neg' modifier.
2198// Currently this modifier is allowed in the following context:
2199//
2200// 1. Before a register, e.g. "-v0", "-v[...]" or "-[v0,v1]".
2201// 2. Before an 'abs' modifier: -abs(...)
2202// 3. Before an SP3 'abs' modifier: -|...|
2203//
2204// In all other cases "-" is handled as a part
2205// of an expression that follows the sign.
2206//
2207// Note: When "-" is followed by an integer literal,
2208// this is interpreted as integer negation rather
2209// than a floating-point NEG modifier applied to N.
2210// Beside being contr-intuitive, such use of floating-point
2211// NEG modifier would have resulted in different meaning
2212// of integer literals used with VOP1/2/C and VOP3,
2213// for example:
2214// v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
2215// v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
2216// Negative fp literals with preceding "-" are
2217// handled likewise for unifomtity
2218//
2219bool
2220AMDGPUAsmParser::parseSP3NegModifier() {
2221
2222 AsmToken NextToken[2];
2223 peekTokens(NextToken);
2224
2225 if (isToken(AsmToken::Minus) &&
2226 (isRegister(NextToken[0], NextToken[1]) ||
2227 NextToken[0].is(AsmToken::Pipe) ||
2228 isId(NextToken[0], "abs"))) {
2229 lex();
2230 return true;
2231 }
2232
2233 return false;
2234}
2235
Sam Kolton9772eb32017-01-11 11:46:30 +00002236OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002237AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
2238 bool AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002239 bool Neg, SP3Neg;
2240 bool Abs, SP3Abs;
2241 SMLoc Loc;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002242
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002243 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
2244 if (isToken(AsmToken::Minus) && peekToken().is(AsmToken::Minus)) {
2245 Error(getLoc(), "invalid syntax, expected 'neg' modifier");
2246 return MatchOperand_ParseFail;
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002247 }
2248
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002249 SP3Neg = parseSP3NegModifier();
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002250
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002251 Loc = getLoc();
2252 Neg = trySkipId("neg");
2253 if (Neg && SP3Neg) {
2254 Error(Loc, "expected register or immediate");
2255 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002256 }
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002257 if (Neg && !skipToken(AsmToken::LParen, "expected left paren after neg"))
2258 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002259
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002260 Abs = trySkipId("abs");
2261 if (Abs && !skipToken(AsmToken::LParen, "expected left paren after abs"))
2262 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002263
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002264 Loc = getLoc();
2265 SP3Abs = trySkipToken(AsmToken::Pipe);
2266 if (Abs && SP3Abs) {
2267 Error(Loc, "expected register or immediate");
2268 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002269 }
2270
Sam Kolton9772eb32017-01-11 11:46:30 +00002271 OperandMatchResultTy Res;
2272 if (AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002273 Res = parseRegOrImm(Operands, SP3Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00002274 } else {
2275 Res = parseReg(Operands);
2276 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002277 if (Res != MatchOperand_Success) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002278 return (SP3Neg || Neg || SP3Abs || Abs)? MatchOperand_ParseFail : Res;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002279 }
2280
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002281 if (SP3Abs && !skipToken(AsmToken::Pipe, "expected vertical bar"))
2282 return MatchOperand_ParseFail;
2283 if (Abs && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2284 return MatchOperand_ParseFail;
2285 if (Neg && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2286 return MatchOperand_ParseFail;
2287
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002288 AMDGPUOperand::Modifiers Mods;
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002289 Mods.Abs = Abs || SP3Abs;
2290 Mods.Neg = Neg || SP3Neg;
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002291
Sam Kolton945231a2016-06-10 09:57:59 +00002292 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002293 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002294 if (Op.isExpr()) {
2295 Error(Op.getStartLoc(), "expected an absolute expression");
2296 return MatchOperand_ParseFail;
2297 }
Sam Kolton945231a2016-06-10 09:57:59 +00002298 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00002299 }
2300 return MatchOperand_Success;
2301}
2302
Alex Bradbury58eba092016-11-01 16:32:05 +00002303OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002304AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
2305 bool AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002306 bool Sext = trySkipId("sext");
2307 if (Sext && !skipToken(AsmToken::LParen, "expected left paren after sext"))
2308 return MatchOperand_ParseFail;
Sam Kolton945231a2016-06-10 09:57:59 +00002309
Sam Kolton9772eb32017-01-11 11:46:30 +00002310 OperandMatchResultTy Res;
2311 if (AllowImm) {
2312 Res = parseRegOrImm(Operands);
2313 } else {
2314 Res = parseReg(Operands);
2315 }
Sam Kolton945231a2016-06-10 09:57:59 +00002316 if (Res != MatchOperand_Success) {
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002317 return Sext? MatchOperand_ParseFail : Res;
Sam Kolton945231a2016-06-10 09:57:59 +00002318 }
2319
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002320 if (Sext && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2321 return MatchOperand_ParseFail;
2322
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002323 AMDGPUOperand::Modifiers Mods;
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002324 Mods.Sext = Sext;
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002325
Sam Kolton945231a2016-06-10 09:57:59 +00002326 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002327 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002328 if (Op.isExpr()) {
2329 Error(Op.getStartLoc(), "expected an absolute expression");
2330 return MatchOperand_ParseFail;
2331 }
Sam Kolton945231a2016-06-10 09:57:59 +00002332 Op.setModifiers(Mods);
2333 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002334
Sam Kolton945231a2016-06-10 09:57:59 +00002335 return MatchOperand_Success;
2336}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002337
Sam Kolton9772eb32017-01-11 11:46:30 +00002338OperandMatchResultTy
2339AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2340 return parseRegOrImmWithFPInputMods(Operands, false);
2341}
2342
2343OperandMatchResultTy
2344AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2345 return parseRegOrImmWithIntInputMods(Operands, false);
2346}
2347
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002348OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002349 auto Loc = getLoc();
2350 if (trySkipId("off")) {
2351 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Loc,
2352 AMDGPUOperand::ImmTyOff, false));
2353 return MatchOperand_Success;
2354 }
2355
2356 if (!isRegister())
2357 return MatchOperand_NoMatch;
2358
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002359 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2360 if (Reg) {
2361 Operands.push_back(std::move(Reg));
2362 return MatchOperand_Success;
2363 }
2364
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002365 return MatchOperand_ParseFail;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002366
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002367}
2368
Tom Stellard45bb48e2015-06-13 03:28:10 +00002369unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002370 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2371
2372 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002373 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2374 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2375 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002376 return Match_InvalidOperand;
2377
Tom Stellard88e0b252015-10-06 15:57:53 +00002378 if ((TSFlags & SIInstrFlags::VOP3) &&
2379 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2380 getForcedEncodingSize() != 64)
2381 return Match_PreferE32;
2382
Sam Koltona568e3d2016-12-22 12:57:41 +00002383 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2384 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002385 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002386 auto OpNum =
2387 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002388 const auto &Op = Inst.getOperand(OpNum);
2389 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2390 return Match_InvalidOperand;
2391 }
2392 }
2393
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00002394 if (TSFlags & SIInstrFlags::FLAT) {
Matt Arsenaultfd023142017-06-12 15:55:58 +00002395 // FIXME: Produces error without correct column reported.
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00002396 auto Opcode = Inst.getOpcode();
2397 auto OpNum = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::offset);
2398
Matt Arsenaultfd023142017-06-12 15:55:58 +00002399 const auto &Op = Inst.getOperand(OpNum);
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00002400 if (!hasFlatOffsets() && Op.getImm() != 0)
Matt Arsenaultfd023142017-06-12 15:55:58 +00002401 return Match_InvalidOperand;
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00002402
2403 // GFX10: Address offset is 12-bit signed byte offset. Must be positive for
2404 // FLAT segment. For FLAT segment MSB is ignored and forced to zero.
2405 if (isGFX10()) {
2406 if (TSFlags & SIInstrFlags::IsNonFlatSeg) {
2407 if (!isInt<12>(Op.getImm()))
2408 return Match_InvalidOperand;
2409 } else {
2410 if (!isUInt<11>(Op.getImm()))
2411 return Match_InvalidOperand;
2412 }
2413 }
Matt Arsenaultfd023142017-06-12 15:55:58 +00002414 }
2415
Tom Stellard45bb48e2015-06-13 03:28:10 +00002416 return Match_Success;
2417}
2418
Matt Arsenault5f45e782017-01-09 18:44:11 +00002419// What asm variants we should check
2420ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2421 if (getForcedEncodingSize() == 32) {
2422 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2423 return makeArrayRef(Variants);
2424 }
2425
2426 if (isForcedVOP3()) {
2427 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2428 return makeArrayRef(Variants);
2429 }
2430
2431 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002432 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2433 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002434 return makeArrayRef(Variants);
2435 }
2436
2437 if (isForcedDPP()) {
2438 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2439 return makeArrayRef(Variants);
2440 }
2441
2442 static const unsigned Variants[] = {
2443 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002444 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002445 };
2446
2447 return makeArrayRef(Variants);
2448}
2449
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002450unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2451 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2452 const unsigned Num = Desc.getNumImplicitUses();
2453 for (unsigned i = 0; i < Num; ++i) {
2454 unsigned Reg = Desc.ImplicitUses[i];
2455 switch (Reg) {
2456 case AMDGPU::FLAT_SCR:
2457 case AMDGPU::VCC:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00002458 case AMDGPU::VCC_LO:
2459 case AMDGPU::VCC_HI:
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002460 case AMDGPU::M0:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00002461 case AMDGPU::SGPR_NULL:
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002462 return Reg;
2463 default:
2464 break;
2465 }
2466 }
2467 return AMDGPU::NoRegister;
2468}
2469
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002470// NB: This code is correct only when used to check constant
2471// bus limitations because GFX7 support no f16 inline constants.
2472// Note that there are no cases when a GFX7 opcode violates
2473// constant bus limitations due to the use of an f16 constant.
2474bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2475 unsigned OpIdx) const {
2476 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2477
2478 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2479 return false;
2480 }
2481
2482 const MCOperand &MO = Inst.getOperand(OpIdx);
2483
2484 int64_t Val = MO.getImm();
2485 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2486
2487 switch (OpSize) { // expected operand size
2488 case 8:
2489 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2490 case 4:
2491 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2492 case 2: {
2493 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2494 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00002495 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16 ||
2496 OperandType == AMDGPU::OPERAND_REG_IMM_V2INT16 ||
2497 OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002498 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2499 } else {
2500 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2501 }
2502 }
2503 default:
2504 llvm_unreachable("invalid operand size");
2505 }
2506}
2507
2508bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2509 const MCOperand &MO = Inst.getOperand(OpIdx);
2510 if (MO.isImm()) {
2511 return !isInlineConstant(Inst, OpIdx);
2512 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002513 return !MO.isReg() ||
2514 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002515}
2516
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002517bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002518 const unsigned Opcode = Inst.getOpcode();
2519 const MCInstrDesc &Desc = MII.get(Opcode);
2520 unsigned ConstantBusUseCount = 0;
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002521 unsigned NumLiterals = 0;
2522 unsigned LiteralSize;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002523
2524 if (Desc.TSFlags &
2525 (SIInstrFlags::VOPC |
2526 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002527 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2528 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002529 // Check special imm operands (used by madmk, etc)
2530 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2531 ++ConstantBusUseCount;
2532 }
2533
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002534 SmallDenseSet<unsigned> SGPRsUsed;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002535 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2536 if (SGPRUsed != AMDGPU::NoRegister) {
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002537 SGPRsUsed.insert(SGPRUsed);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002538 ++ConstantBusUseCount;
2539 }
2540
2541 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2542 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2543 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2544
2545 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2546
2547 for (int OpIdx : OpIndices) {
2548 if (OpIdx == -1) break;
2549
2550 const MCOperand &MO = Inst.getOperand(OpIdx);
2551 if (usesConstantBus(Inst, OpIdx)) {
2552 if (MO.isReg()) {
2553 const unsigned Reg = mc2PseudoReg(MO.getReg());
2554 // Pairs of registers with a partial intersections like these
2555 // s0, s[0:1]
2556 // flat_scratch_lo, flat_scratch
2557 // flat_scratch_lo, flat_scratch_hi
2558 // are theoretically valid but they are disabled anyway.
2559 // Note that this code mimics SIInstrInfo::verifyInstruction
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002560 if (!SGPRsUsed.count(Reg)) {
2561 SGPRsUsed.insert(Reg);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002562 ++ConstantBusUseCount;
2563 }
2564 SGPRUsed = Reg;
2565 } else { // Expression or a literal
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002566
2567 if (Desc.OpInfo[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE)
2568 continue; // special operand like VINTERP attr_chan
2569
2570 // An instruction may use only one literal.
2571 // This has been validated on the previous step.
2572 // See validateVOP3Literal.
2573 // This literal may be used as more than one operand.
2574 // If all these operands are of the same size,
2575 // this literal counts as one scalar value.
2576 // Otherwise it counts as 2 scalar values.
2577 // See "GFX10 Shader Programming", section 3.6.2.3.
2578
2579 unsigned Size = AMDGPU::getOperandSize(Desc, OpIdx);
2580 if (Size < 4) Size = 4;
2581
2582 if (NumLiterals == 0) {
2583 NumLiterals = 1;
2584 LiteralSize = Size;
2585 } else if (LiteralSize != Size) {
2586 NumLiterals = 2;
2587 }
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002588 }
2589 }
2590 }
2591 }
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002592 ConstantBusUseCount += NumLiterals;
2593
2594 if (isGFX10())
2595 return ConstantBusUseCount <= 2;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002596
2597 return ConstantBusUseCount <= 1;
2598}
2599
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002600bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002601 const unsigned Opcode = Inst.getOpcode();
2602 const MCInstrDesc &Desc = MII.get(Opcode);
2603
2604 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2605 if (DstIdx == -1 ||
2606 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2607 return true;
2608 }
2609
2610 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2611
2612 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2613 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2614 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2615
2616 assert(DstIdx != -1);
2617 const MCOperand &Dst = Inst.getOperand(DstIdx);
2618 assert(Dst.isReg());
2619 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2620
2621 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2622
2623 for (int SrcIdx : SrcIndices) {
2624 if (SrcIdx == -1) break;
2625 const MCOperand &Src = Inst.getOperand(SrcIdx);
2626 if (Src.isReg()) {
2627 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2628 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2629 return false;
2630 }
2631 }
2632 }
2633
2634 return true;
2635}
2636
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002637bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2638
2639 const unsigned Opc = Inst.getOpcode();
2640 const MCInstrDesc &Desc = MII.get(Opc);
2641
2642 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2643 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2644 assert(ClampIdx != -1);
2645 return Inst.getOperand(ClampIdx).getImm() == 0;
2646 }
2647
2648 return true;
2649}
2650
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002651bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2652
2653 const unsigned Opc = Inst.getOpcode();
2654 const MCInstrDesc &Desc = MII.get(Opc);
2655
2656 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2657 return true;
2658
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002659 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2660 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2661 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2662
2663 assert(VDataIdx != -1);
2664 assert(DMaskIdx != -1);
2665 assert(TFEIdx != -1);
2666
2667 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2668 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2669 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2670 if (DMask == 0)
2671 DMask = 1;
2672
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002673 unsigned DataSize =
2674 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2675 if (hasPackedD16()) {
2676 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2677 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2678 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002679 }
2680
2681 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002682}
2683
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00002684bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst) {
2685 const unsigned Opc = Inst.getOpcode();
2686 const MCInstrDesc &Desc = MII.get(Opc);
2687
2688 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0 || !isGFX10())
2689 return true;
2690
2691 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
2692 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
2693 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
2694 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
2695 int SrsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
2696 int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
2697
2698 assert(VAddr0Idx != -1);
2699 assert(SrsrcIdx != -1);
2700 assert(DimIdx != -1);
2701 assert(SrsrcIdx > VAddr0Idx);
2702
2703 unsigned Dim = Inst.getOperand(DimIdx).getImm();
2704 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim);
2705 bool IsNSA = SrsrcIdx - VAddr0Idx > 1;
2706 unsigned VAddrSize =
2707 IsNSA ? SrsrcIdx - VAddr0Idx
2708 : AMDGPU::getRegOperandSize(getMRI(), Desc, VAddr0Idx) / 4;
2709
2710 unsigned AddrSize = BaseOpcode->NumExtraArgs +
2711 (BaseOpcode->Gradients ? DimInfo->NumGradients : 0) +
2712 (BaseOpcode->Coordinates ? DimInfo->NumCoords : 0) +
2713 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
2714 if (!IsNSA) {
2715 if (AddrSize > 8)
2716 AddrSize = 16;
2717 else if (AddrSize > 4)
2718 AddrSize = 8;
2719 }
2720
2721 return VAddrSize == AddrSize;
2722}
2723
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002724bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2725
2726 const unsigned Opc = Inst.getOpcode();
2727 const MCInstrDesc &Desc = MII.get(Opc);
2728
2729 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2730 return true;
2731 if (!Desc.mayLoad() || !Desc.mayStore())
2732 return true; // Not atomic
2733
2734 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2735 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2736
2737 // This is an incomplete check because image_atomic_cmpswap
2738 // may only use 0x3 and 0xf while other atomic operations
2739 // may use 0x1 and 0x3. However these limitations are
2740 // verified when we check that dmask matches dst size.
2741 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2742}
2743
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002744bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
2745
2746 const unsigned Opc = Inst.getOpcode();
2747 const MCInstrDesc &Desc = MII.get(Opc);
2748
2749 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
2750 return true;
2751
2752 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2753 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2754
2755 // GATHER4 instructions use dmask in a different fashion compared to
2756 // other MIMG instructions. The only useful DMASK values are
2757 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2758 // (red,red,red,red) etc.) The ISA document doesn't mention
2759 // this.
2760 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
2761}
2762
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002763bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2764
2765 const unsigned Opc = Inst.getOpcode();
2766 const MCInstrDesc &Desc = MII.get(Opc);
2767
2768 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2769 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002770
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002771 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2772 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
2773 if (isCI() || isSI())
2774 return false;
2775 }
2776
2777 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002778}
2779
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00002780bool AMDGPUAsmParser::validateMIMGDim(const MCInst &Inst) {
2781 const unsigned Opc = Inst.getOpcode();
2782 const MCInstrDesc &Desc = MII.get(Opc);
2783
2784 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2785 return true;
2786
2787 int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
2788 if (DimIdx < 0)
2789 return true;
2790
2791 long Imm = Inst.getOperand(DimIdx).getImm();
2792 if (Imm < 0 || Imm >= 8)
2793 return false;
2794
2795 return true;
2796}
2797
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002798static bool IsRevOpcode(const unsigned Opcode)
2799{
2800 switch (Opcode) {
2801 case AMDGPU::V_SUBREV_F32_e32:
2802 case AMDGPU::V_SUBREV_F32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002803 case AMDGPU::V_SUBREV_F32_e32_gfx10:
2804 case AMDGPU::V_SUBREV_F32_e32_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002805 case AMDGPU::V_SUBREV_F32_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002806 case AMDGPU::V_SUBREV_F32_e64_gfx10:
2807 case AMDGPU::V_SUBREV_F32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002808 case AMDGPU::V_SUBREV_F32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002809
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002810 case AMDGPU::V_SUBREV_I32_e32:
2811 case AMDGPU::V_SUBREV_I32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002812 case AMDGPU::V_SUBREV_I32_e32_gfx6_gfx7:
2813 case AMDGPU::V_SUBREV_I32_e64_gfx6_gfx7:
2814
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002815 case AMDGPU::V_SUBBREV_U32_e32:
2816 case AMDGPU::V_SUBBREV_U32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002817 case AMDGPU::V_SUBBREV_U32_e32_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002818 case AMDGPU::V_SUBBREV_U32_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002819 case AMDGPU::V_SUBBREV_U32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002820 case AMDGPU::V_SUBBREV_U32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002821
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002822 case AMDGPU::V_SUBREV_U32_e32:
2823 case AMDGPU::V_SUBREV_U32_e64:
2824 case AMDGPU::V_SUBREV_U32_e32_gfx9:
2825 case AMDGPU::V_SUBREV_U32_e32_vi:
2826 case AMDGPU::V_SUBREV_U32_e64_gfx9:
2827 case AMDGPU::V_SUBREV_U32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002828
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002829 case AMDGPU::V_SUBREV_F16_e32:
2830 case AMDGPU::V_SUBREV_F16_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002831 case AMDGPU::V_SUBREV_F16_e32_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002832 case AMDGPU::V_SUBREV_F16_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002833 case AMDGPU::V_SUBREV_F16_e64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002834 case AMDGPU::V_SUBREV_F16_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002835
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002836 case AMDGPU::V_SUBREV_U16_e32:
2837 case AMDGPU::V_SUBREV_U16_e64:
2838 case AMDGPU::V_SUBREV_U16_e32_vi:
2839 case AMDGPU::V_SUBREV_U16_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002840
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002841 case AMDGPU::V_SUBREV_CO_U32_e32_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002842 case AMDGPU::V_SUBREV_CO_U32_e64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002843 case AMDGPU::V_SUBREV_CO_U32_e64_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002844
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002845 case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9:
2846 case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002847
2848 case AMDGPU::V_SUBREV_NC_U32_e32_gfx10:
2849 case AMDGPU::V_SUBREV_NC_U32_e64_gfx10:
2850
2851 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
2852 case AMDGPU::V_SUBREV_CO_CI_U32_e64_gfx10:
2853
2854 case AMDGPU::V_LSHRREV_B32_e32:
2855 case AMDGPU::V_LSHRREV_B32_e64:
2856 case AMDGPU::V_LSHRREV_B32_e32_gfx6_gfx7:
2857 case AMDGPU::V_LSHRREV_B32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002858 case AMDGPU::V_LSHRREV_B32_e32_vi:
2859 case AMDGPU::V_LSHRREV_B32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002860 case AMDGPU::V_LSHRREV_B32_e32_gfx10:
2861 case AMDGPU::V_LSHRREV_B32_e64_gfx10:
2862
2863 case AMDGPU::V_ASHRREV_I32_e32:
2864 case AMDGPU::V_ASHRREV_I32_e64:
2865 case AMDGPU::V_ASHRREV_I32_e32_gfx10:
2866 case AMDGPU::V_ASHRREV_I32_e32_gfx6_gfx7:
2867 case AMDGPU::V_ASHRREV_I32_e32_vi:
2868 case AMDGPU::V_ASHRREV_I32_e64_gfx10:
2869 case AMDGPU::V_ASHRREV_I32_e64_gfx6_gfx7:
2870 case AMDGPU::V_ASHRREV_I32_e64_vi:
2871
2872 case AMDGPU::V_LSHLREV_B32_e32:
2873 case AMDGPU::V_LSHLREV_B32_e64:
2874 case AMDGPU::V_LSHLREV_B32_e32_gfx10:
2875 case AMDGPU::V_LSHLREV_B32_e32_gfx6_gfx7:
2876 case AMDGPU::V_LSHLREV_B32_e32_vi:
2877 case AMDGPU::V_LSHLREV_B32_e64_gfx10:
2878 case AMDGPU::V_LSHLREV_B32_e64_gfx6_gfx7:
2879 case AMDGPU::V_LSHLREV_B32_e64_vi:
2880
2881 case AMDGPU::V_LSHLREV_B16_e32:
2882 case AMDGPU::V_LSHLREV_B16_e64:
2883 case AMDGPU::V_LSHLREV_B16_e32_vi:
2884 case AMDGPU::V_LSHLREV_B16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002885 case AMDGPU::V_LSHLREV_B16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002886
2887 case AMDGPU::V_LSHRREV_B16_e32:
2888 case AMDGPU::V_LSHRREV_B16_e64:
2889 case AMDGPU::V_LSHRREV_B16_e32_vi:
2890 case AMDGPU::V_LSHRREV_B16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002891 case AMDGPU::V_LSHRREV_B16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002892
2893 case AMDGPU::V_ASHRREV_I16_e32:
2894 case AMDGPU::V_ASHRREV_I16_e64:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002895 case AMDGPU::V_ASHRREV_I16_e32_vi:
2896 case AMDGPU::V_ASHRREV_I16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002897 case AMDGPU::V_ASHRREV_I16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002898
2899 case AMDGPU::V_LSHLREV_B64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002900 case AMDGPU::V_LSHLREV_B64_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002901 case AMDGPU::V_LSHLREV_B64_vi:
2902
2903 case AMDGPU::V_LSHRREV_B64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002904 case AMDGPU::V_LSHRREV_B64_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002905 case AMDGPU::V_LSHRREV_B64_vi:
2906
2907 case AMDGPU::V_ASHRREV_I64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002908 case AMDGPU::V_ASHRREV_I64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002909 case AMDGPU::V_ASHRREV_I64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002910
2911 case AMDGPU::V_PK_LSHLREV_B16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002912 case AMDGPU::V_PK_LSHLREV_B16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002913 case AMDGPU::V_PK_LSHLREV_B16_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002914
2915 case AMDGPU::V_PK_LSHRREV_B16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002916 case AMDGPU::V_PK_LSHRREV_B16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002917 case AMDGPU::V_PK_LSHRREV_B16_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002918 case AMDGPU::V_PK_ASHRREV_I16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002919 case AMDGPU::V_PK_ASHRREV_I16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002920 case AMDGPU::V_PK_ASHRREV_I16_vi:
2921 return true;
2922 default:
2923 return false;
2924 }
2925}
2926
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002927bool AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
2928
2929 using namespace SIInstrFlags;
2930 const unsigned Opcode = Inst.getOpcode();
2931 const MCInstrDesc &Desc = MII.get(Opcode);
2932
2933 // lds_direct register is defined so that it can be used
2934 // with 9-bit operands only. Ignore encodings which do not accept these.
2935 if ((Desc.TSFlags & (VOP1 | VOP2 | VOP3 | VOPC | VOP3P | SIInstrFlags::SDWA)) == 0)
2936 return true;
2937
2938 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2939 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2940 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2941
2942 const int SrcIndices[] = { Src1Idx, Src2Idx };
2943
2944 // lds_direct cannot be specified as either src1 or src2.
2945 for (int SrcIdx : SrcIndices) {
2946 if (SrcIdx == -1) break;
2947 const MCOperand &Src = Inst.getOperand(SrcIdx);
2948 if (Src.isReg() && Src.getReg() == LDS_DIRECT) {
2949 return false;
2950 }
2951 }
2952
2953 if (Src0Idx == -1)
2954 return true;
2955
2956 const MCOperand &Src = Inst.getOperand(Src0Idx);
2957 if (!Src.isReg() || Src.getReg() != LDS_DIRECT)
2958 return true;
2959
2960 // lds_direct is specified as src0. Check additional limitations.
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002961 return (Desc.TSFlags & SIInstrFlags::SDWA) == 0 && !IsRevOpcode(Opcode);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002962}
2963
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00002964bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst) const {
2965 unsigned Opcode = Inst.getOpcode();
2966 const MCInstrDesc &Desc = MII.get(Opcode);
2967 if (!(Desc.TSFlags & (SIInstrFlags::SOP2 | SIInstrFlags::SOPC)))
2968 return true;
2969
2970 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2971 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2972
2973 const int OpIndices[] = { Src0Idx, Src1Idx };
2974
2975 unsigned NumLiterals = 0;
2976 uint32_t LiteralValue;
2977
2978 for (int OpIdx : OpIndices) {
2979 if (OpIdx == -1) break;
2980
2981 const MCOperand &MO = Inst.getOperand(OpIdx);
2982 if (MO.isImm() &&
2983 // Exclude special imm operands (like that used by s_set_gpr_idx_on)
2984 AMDGPU::isSISrcOperand(Desc, OpIdx) &&
2985 !isInlineConstant(Inst, OpIdx)) {
2986 uint32_t Value = static_cast<uint32_t>(MO.getImm());
2987 if (NumLiterals == 0 || LiteralValue != Value) {
2988 LiteralValue = Value;
2989 ++NumLiterals;
2990 }
2991 }
2992 }
2993
2994 return NumLiterals <= 1;
2995}
2996
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00002997// VOP3 literal is only allowed in GFX10+ and only one can be used
2998bool AMDGPUAsmParser::validateVOP3Literal(const MCInst &Inst) const {
2999 unsigned Opcode = Inst.getOpcode();
3000 const MCInstrDesc &Desc = MII.get(Opcode);
3001 if (!(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)))
3002 return true;
3003
3004 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3005 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3006 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
3007
3008 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
3009
3010 unsigned NumLiterals = 0;
3011 uint32_t LiteralValue;
3012
3013 for (int OpIdx : OpIndices) {
3014 if (OpIdx == -1) break;
3015
3016 const MCOperand &MO = Inst.getOperand(OpIdx);
3017 if (!MO.isImm() || !AMDGPU::isSISrcOperand(Desc, OpIdx))
3018 continue;
3019
3020 if (!isInlineConstant(Inst, OpIdx)) {
3021 uint32_t Value = static_cast<uint32_t>(MO.getImm());
3022 if (NumLiterals == 0 || LiteralValue != Value) {
3023 LiteralValue = Value;
3024 ++NumLiterals;
3025 }
3026 }
3027 }
3028
3029 return !NumLiterals ||
3030 (NumLiterals == 1 && getFeatureBits()[AMDGPU::FeatureVOP3Literal]);
3031}
3032
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003033bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
3034 const SMLoc &IDLoc) {
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00003035 if (!validateLdsDirect(Inst)) {
3036 Error(IDLoc,
3037 "invalid use of lds_direct");
3038 return false;
3039 }
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00003040 if (!validateSOPLiteral(Inst)) {
3041 Error(IDLoc,
3042 "only one literal operand is allowed");
3043 return false;
3044 }
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003045 if (!validateVOP3Literal(Inst)) {
3046 Error(IDLoc,
3047 "invalid literal operand");
3048 return false;
3049 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003050 if (!validateConstantBusLimitations(Inst)) {
3051 Error(IDLoc,
3052 "invalid operand (violates constant bus restrictions)");
3053 return false;
3054 }
3055 if (!validateEarlyClobberLimitations(Inst)) {
3056 Error(IDLoc,
3057 "destination must be different than all sources");
3058 return false;
3059 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00003060 if (!validateIntClampSupported(Inst)) {
3061 Error(IDLoc,
3062 "integer clamping is not supported on this GPU");
3063 return false;
3064 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00003065 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
3066 if (!validateMIMGD16(Inst)) {
3067 Error(IDLoc,
3068 "d16 modifier is not supported on this GPU");
3069 return false;
3070 }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003071 if (!validateMIMGDim(Inst)) {
3072 Error(IDLoc, "dim modifier is required on this GPU");
3073 return false;
3074 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00003075 if (!validateMIMGDataSize(Inst)) {
3076 Error(IDLoc,
3077 "image data size does not match dmask and tfe");
3078 return false;
3079 }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003080 if (!validateMIMGAddrSize(Inst)) {
3081 Error(IDLoc,
3082 "image address size does not match dim and a16");
3083 return false;
3084 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00003085 if (!validateMIMGAtomicDMask(Inst)) {
3086 Error(IDLoc,
3087 "invalid atomic image dmask");
3088 return false;
3089 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00003090 if (!validateMIMGGatherDMask(Inst)) {
3091 Error(IDLoc,
3092 "invalid image_gather dmask: only one bit must be set");
3093 return false;
3094 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003095
3096 return true;
3097}
3098
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00003099static std::string AMDGPUMnemonicSpellCheck(StringRef S,
3100 const FeatureBitset &FBS,
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00003101 unsigned VariantID = 0);
3102
Tom Stellard45bb48e2015-06-13 03:28:10 +00003103bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3104 OperandVector &Operands,
3105 MCStreamer &Out,
3106 uint64_t &ErrorInfo,
3107 bool MatchingInlineAsm) {
3108 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00003109 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00003110 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00003111 uint64_t EI;
3112 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
3113 Variant);
3114 // We order match statuses from least to most specific. We use most specific
3115 // status as resulting
3116 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
3117 if ((R == Match_Success) ||
3118 (R == Match_PreferE32) ||
3119 (R == Match_MissingFeature && Result != Match_PreferE32) ||
3120 (R == Match_InvalidOperand && Result != Match_MissingFeature
3121 && Result != Match_PreferE32) ||
3122 (R == Match_MnemonicFail && Result != Match_InvalidOperand
3123 && Result != Match_MissingFeature
3124 && Result != Match_PreferE32)) {
3125 Result = R;
3126 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003127 }
Sam Koltond63d8a72016-09-09 09:37:51 +00003128 if (R == Match_Success)
3129 break;
3130 }
3131
3132 switch (Result) {
3133 default: break;
3134 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003135 if (!validateInstruction(Inst, IDLoc)) {
3136 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00003137 }
Sam Koltond63d8a72016-09-09 09:37:51 +00003138 Inst.setLoc(IDLoc);
3139 Out.EmitInstruction(Inst, getSTI());
3140 return false;
3141
3142 case Match_MissingFeature:
3143 return Error(IDLoc, "instruction not supported on this GPU");
3144
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00003145 case Match_MnemonicFail: {
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00003146 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00003147 std::string Suggestion = AMDGPUMnemonicSpellCheck(
3148 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
3149 return Error(IDLoc, "invalid instruction" + Suggestion,
3150 ((AMDGPUOperand &)*Operands[0]).getLocRange());
3151 }
Sam Koltond63d8a72016-09-09 09:37:51 +00003152
3153 case Match_InvalidOperand: {
3154 SMLoc ErrorLoc = IDLoc;
3155 if (ErrorInfo != ~0ULL) {
3156 if (ErrorInfo >= Operands.size()) {
3157 return Error(IDLoc, "too few operands for instruction");
3158 }
3159 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
3160 if (ErrorLoc == SMLoc())
3161 ErrorLoc = IDLoc;
3162 }
3163 return Error(ErrorLoc, "invalid operand for instruction");
3164 }
3165
3166 case Match_PreferE32:
3167 return Error(IDLoc, "internal error: instruction without _e64 suffix "
3168 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003169 }
3170 llvm_unreachable("Implement any new match types added!");
3171}
3172
Artem Tamazov25478d82016-12-29 15:41:52 +00003173bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
3174 int64_t Tmp = -1;
3175 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
3176 return true;
3177 }
3178 if (getParser().parseAbsoluteExpression(Tmp)) {
3179 return true;
3180 }
3181 Ret = static_cast<uint32_t>(Tmp);
3182 return false;
3183}
3184
Tom Stellard347ac792015-06-26 21:15:07 +00003185bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
3186 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00003187 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00003188 return TokError("invalid major version");
3189
Tom Stellard347ac792015-06-26 21:15:07 +00003190 if (getLexer().isNot(AsmToken::Comma))
3191 return TokError("minor version number required, comma expected");
3192 Lex();
3193
Artem Tamazov25478d82016-12-29 15:41:52 +00003194 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00003195 return TokError("invalid minor version");
3196
Tom Stellard347ac792015-06-26 21:15:07 +00003197 return false;
3198}
3199
Scott Linder1e8c2c72018-06-21 19:38:56 +00003200bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
3201 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
3202 return TokError("directive only supported for amdgcn architecture");
3203
3204 std::string Target;
3205
3206 SMLoc TargetStart = getTok().getLoc();
3207 if (getParser().parseEscapedString(Target))
3208 return true;
3209 SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
3210
3211 std::string ExpectedTarget;
3212 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
3213 IsaInfo::streamIsaVersion(&getSTI(), ExpectedTargetOS);
3214
3215 if (Target != ExpectedTargetOS.str())
3216 return getParser().Error(TargetRange.Start, "target must match options",
3217 TargetRange);
3218
3219 getTargetStreamer().EmitDirectiveAMDGCNTarget(Target);
3220 return false;
3221}
3222
3223bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
3224 return getParser().Error(Range.Start, "value out of range", Range);
3225}
3226
3227bool AMDGPUAsmParser::calculateGPRBlocks(
3228 const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
3229 bool XNACKUsed, unsigned NextFreeVGPR, SMRange VGPRRange,
3230 unsigned NextFreeSGPR, SMRange SGPRRange, unsigned &VGPRBlocks,
3231 unsigned &SGPRBlocks) {
3232 // TODO(scott.linder): These calculations are duplicated from
3233 // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003234 IsaVersion Version = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003235
3236 unsigned NumVGPRs = NextFreeVGPR;
3237 unsigned NumSGPRs = NextFreeSGPR;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003238
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003239 if (Version.Major >= 10)
3240 NumSGPRs = 0;
3241 else {
3242 unsigned MaxAddressableNumSGPRs =
3243 IsaInfo::getAddressableNumSGPRs(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003244
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003245 if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
3246 NumSGPRs > MaxAddressableNumSGPRs)
3247 return OutOfRangeError(SGPRRange);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003248
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003249 NumSGPRs +=
3250 IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003251
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003252 if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
3253 NumSGPRs > MaxAddressableNumSGPRs)
3254 return OutOfRangeError(SGPRRange);
3255
3256 if (Features.test(FeatureSGPRInitBug))
3257 NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
3258 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00003259
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003260 VGPRBlocks = IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs);
3261 SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003262
3263 return false;
3264}
3265
3266bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
3267 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
3268 return TokError("directive only supported for amdgcn architecture");
3269
3270 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
3271 return TokError("directive only supported for amdhsa OS");
3272
3273 StringRef KernelName;
3274 if (getParser().parseIdentifier(KernelName))
3275 return true;
3276
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00003277 kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003278
3279 StringSet<> Seen;
3280
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003281 IsaVersion IVersion = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003282
3283 SMRange VGPRRange;
3284 uint64_t NextFreeVGPR = 0;
3285 SMRange SGPRRange;
3286 uint64_t NextFreeSGPR = 0;
3287 unsigned UserSGPRCount = 0;
3288 bool ReserveVCC = true;
3289 bool ReserveFlatScr = true;
3290 bool ReserveXNACK = hasXNACK();
3291
3292 while (true) {
3293 while (getLexer().is(AsmToken::EndOfStatement))
3294 Lex();
3295
3296 if (getLexer().isNot(AsmToken::Identifier))
3297 return TokError("expected .amdhsa_ directive or .end_amdhsa_kernel");
3298
3299 StringRef ID = getTok().getIdentifier();
3300 SMRange IDRange = getTok().getLocRange();
3301 Lex();
3302
3303 if (ID == ".end_amdhsa_kernel")
3304 break;
3305
3306 if (Seen.find(ID) != Seen.end())
3307 return TokError(".amdhsa_ directives cannot be repeated");
3308 Seen.insert(ID);
3309
3310 SMLoc ValStart = getTok().getLoc();
3311 int64_t IVal;
3312 if (getParser().parseAbsoluteExpression(IVal))
3313 return true;
3314 SMLoc ValEnd = getTok().getLoc();
3315 SMRange ValRange = SMRange(ValStart, ValEnd);
3316
3317 if (IVal < 0)
3318 return OutOfRangeError(ValRange);
3319
3320 uint64_t Val = IVal;
3321
3322#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
3323 if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
3324 return OutOfRangeError(RANGE); \
3325 AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
3326
3327 if (ID == ".amdhsa_group_segment_fixed_size") {
3328 if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
3329 return OutOfRangeError(ValRange);
3330 KD.group_segment_fixed_size = Val;
3331 } else if (ID == ".amdhsa_private_segment_fixed_size") {
3332 if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
3333 return OutOfRangeError(ValRange);
3334 KD.private_segment_fixed_size = Val;
3335 } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
3336 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3337 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
3338 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003339 UserSGPRCount += 4;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003340 } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
3341 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3342 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
3343 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003344 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003345 } else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
3346 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3347 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
3348 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003349 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003350 } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
3351 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3352 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
3353 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003354 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003355 } else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
3356 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3357 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
3358 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003359 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003360 } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
3361 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3362 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
3363 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003364 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003365 } else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
3366 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3367 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
3368 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003369 UserSGPRCount += 1;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003370 } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
3371 PARSE_BITS_ENTRY(
3372 KD.compute_pgm_rsrc2,
3373 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET, Val,
3374 ValRange);
3375 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
3376 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3377 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
3378 ValRange);
3379 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
3380 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3381 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
3382 ValRange);
3383 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
3384 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3385 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
3386 ValRange);
3387 } else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
3388 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3389 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
3390 ValRange);
3391 } else if (ID == ".amdhsa_system_vgpr_workitem_id") {
3392 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3393 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
3394 ValRange);
3395 } else if (ID == ".amdhsa_next_free_vgpr") {
3396 VGPRRange = ValRange;
3397 NextFreeVGPR = Val;
3398 } else if (ID == ".amdhsa_next_free_sgpr") {
3399 SGPRRange = ValRange;
3400 NextFreeSGPR = Val;
3401 } else if (ID == ".amdhsa_reserve_vcc") {
3402 if (!isUInt<1>(Val))
3403 return OutOfRangeError(ValRange);
3404 ReserveVCC = Val;
3405 } else if (ID == ".amdhsa_reserve_flat_scratch") {
3406 if (IVersion.Major < 7)
3407 return getParser().Error(IDRange.Start, "directive requires gfx7+",
3408 IDRange);
3409 if (!isUInt<1>(Val))
3410 return OutOfRangeError(ValRange);
3411 ReserveFlatScr = Val;
3412 } else if (ID == ".amdhsa_reserve_xnack_mask") {
3413 if (IVersion.Major < 8)
3414 return getParser().Error(IDRange.Start, "directive requires gfx8+",
3415 IDRange);
3416 if (!isUInt<1>(Val))
3417 return OutOfRangeError(ValRange);
3418 ReserveXNACK = Val;
3419 } else if (ID == ".amdhsa_float_round_mode_32") {
3420 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3421 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
3422 } else if (ID == ".amdhsa_float_round_mode_16_64") {
3423 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3424 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
3425 } else if (ID == ".amdhsa_float_denorm_mode_32") {
3426 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3427 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
3428 } else if (ID == ".amdhsa_float_denorm_mode_16_64") {
3429 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3430 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
3431 ValRange);
3432 } else if (ID == ".amdhsa_dx10_clamp") {
3433 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3434 COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
3435 } else if (ID == ".amdhsa_ieee_mode") {
3436 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
3437 Val, ValRange);
3438 } else if (ID == ".amdhsa_fp16_overflow") {
3439 if (IVersion.Major < 9)
3440 return getParser().Error(IDRange.Start, "directive requires gfx9+",
3441 IDRange);
3442 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
3443 ValRange);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003444 } else if (ID == ".amdhsa_workgroup_processor_mode") {
3445 if (IVersion.Major < 10)
3446 return getParser().Error(IDRange.Start, "directive requires gfx10+",
3447 IDRange);
3448 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_WGP_MODE, Val,
3449 ValRange);
3450 } else if (ID == ".amdhsa_memory_ordered") {
3451 if (IVersion.Major < 10)
3452 return getParser().Error(IDRange.Start, "directive requires gfx10+",
3453 IDRange);
3454 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_MEM_ORDERED, Val,
3455 ValRange);
3456 } else if (ID == ".amdhsa_forward_progress") {
3457 if (IVersion.Major < 10)
3458 return getParser().Error(IDRange.Start, "directive requires gfx10+",
3459 IDRange);
3460 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FWD_PROGRESS, Val,
3461 ValRange);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003462 } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
3463 PARSE_BITS_ENTRY(
3464 KD.compute_pgm_rsrc2,
3465 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
3466 ValRange);
3467 } else if (ID == ".amdhsa_exception_fp_denorm_src") {
3468 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3469 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
3470 Val, ValRange);
3471 } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
3472 PARSE_BITS_ENTRY(
3473 KD.compute_pgm_rsrc2,
3474 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
3475 ValRange);
3476 } else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
3477 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3478 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
3479 Val, ValRange);
3480 } else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
3481 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3482 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
3483 Val, ValRange);
3484 } else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
3485 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3486 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
3487 Val, ValRange);
3488 } else if (ID == ".amdhsa_exception_int_div_zero") {
3489 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3490 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
3491 Val, ValRange);
3492 } else {
3493 return getParser().Error(IDRange.Start,
3494 "unknown .amdhsa_kernel directive", IDRange);
3495 }
3496
3497#undef PARSE_BITS_ENTRY
3498 }
3499
3500 if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
3501 return TokError(".amdhsa_next_free_vgpr directive is required");
3502
3503 if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
3504 return TokError(".amdhsa_next_free_sgpr directive is required");
3505
3506 unsigned VGPRBlocks;
3507 unsigned SGPRBlocks;
3508 if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
3509 ReserveXNACK, NextFreeVGPR, VGPRRange, NextFreeSGPR,
3510 SGPRRange, VGPRBlocks, SGPRBlocks))
3511 return true;
3512
3513 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
3514 VGPRBlocks))
3515 return OutOfRangeError(VGPRRange);
3516 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3517 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
3518
3519 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
3520 SGPRBlocks))
3521 return OutOfRangeError(SGPRRange);
3522 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3523 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
3524 SGPRBlocks);
3525
3526 if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
3527 return TokError("too many user SGPRs enabled");
3528 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
3529 UserSGPRCount);
3530
3531 getTargetStreamer().EmitAmdhsaKernelDescriptor(
3532 getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
3533 ReserveFlatScr, ReserveXNACK);
3534 return false;
3535}
3536
Tom Stellard347ac792015-06-26 21:15:07 +00003537bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00003538 uint32_t Major;
3539 uint32_t Minor;
3540
3541 if (ParseDirectiveMajorMinor(Major, Minor))
3542 return true;
3543
3544 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
3545 return false;
3546}
3547
3548bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00003549 uint32_t Major;
3550 uint32_t Minor;
3551 uint32_t Stepping;
3552 StringRef VendorName;
3553 StringRef ArchName;
3554
3555 // If this directive has no arguments, then use the ISA version for the
3556 // targeted GPU.
3557 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003558 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003559 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
3560 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00003561 "AMD", "AMDGPU");
3562 return false;
3563 }
3564
Tom Stellard347ac792015-06-26 21:15:07 +00003565 if (ParseDirectiveMajorMinor(Major, Minor))
3566 return true;
3567
3568 if (getLexer().isNot(AsmToken::Comma))
3569 return TokError("stepping version number required, comma expected");
3570 Lex();
3571
Artem Tamazov25478d82016-12-29 15:41:52 +00003572 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00003573 return TokError("invalid stepping version");
3574
Tom Stellard347ac792015-06-26 21:15:07 +00003575 if (getLexer().isNot(AsmToken::Comma))
3576 return TokError("vendor name required, comma expected");
3577 Lex();
3578
3579 if (getLexer().isNot(AsmToken::String))
3580 return TokError("invalid vendor name");
3581
3582 VendorName = getLexer().getTok().getStringContents();
3583 Lex();
3584
3585 if (getLexer().isNot(AsmToken::Comma))
3586 return TokError("arch name required, comma expected");
3587 Lex();
3588
3589 if (getLexer().isNot(AsmToken::String))
3590 return TokError("invalid arch name");
3591
3592 ArchName = getLexer().getTok().getStringContents();
3593 Lex();
3594
3595 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
3596 VendorName, ArchName);
3597 return false;
3598}
3599
Tom Stellardff7416b2015-06-26 21:58:31 +00003600bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
3601 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00003602 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
3603 // assembly for backwards compatibility.
3604 if (ID == "max_scratch_backing_memory_byte_size") {
3605 Parser.eatToEndOfStatement();
3606 return false;
3607 }
3608
Valery Pykhtindc110542016-03-06 20:25:36 +00003609 SmallString<40> ErrStr;
3610 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00003611 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00003612 return TokError(Err.str());
3613 }
Tom Stellardff7416b2015-06-26 21:58:31 +00003614 Lex();
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003615
3616 if (ID == "enable_wgp_mode") {
3617 if (G_00B848_WGP_MODE(Header.compute_pgm_resource_registers) && !isGFX10())
3618 return TokError("enable_wgp_mode=1 is only allowed on GFX10+");
3619 }
3620
3621 if (ID == "enable_mem_ordered") {
3622 if (G_00B848_MEM_ORDERED(Header.compute_pgm_resource_registers) && !isGFX10())
3623 return TokError("enable_mem_ordered=1 is only allowed on GFX10+");
3624 }
3625
3626 if (ID == "enable_fwd_progress") {
3627 if (G_00B848_FWD_PROGRESS(Header.compute_pgm_resource_registers) && !isGFX10())
3628 return TokError("enable_fwd_progress=1 is only allowed on GFX10+");
3629 }
3630
Tom Stellardff7416b2015-06-26 21:58:31 +00003631 return false;
3632}
3633
3634bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00003635 amd_kernel_code_t Header;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003636 AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
Tom Stellardff7416b2015-06-26 21:58:31 +00003637
3638 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00003639 // Lex EndOfStatement. This is in a while loop, because lexing a comment
3640 // will set the current token to EndOfStatement.
3641 while(getLexer().is(AsmToken::EndOfStatement))
3642 Lex();
3643
3644 if (getLexer().isNot(AsmToken::Identifier))
3645 return TokError("expected value identifier or .end_amd_kernel_code_t");
3646
3647 StringRef ID = getLexer().getTok().getIdentifier();
3648 Lex();
3649
3650 if (ID == ".end_amd_kernel_code_t")
3651 break;
3652
3653 if (ParseAMDKernelCodeTValue(ID, Header))
3654 return true;
3655 }
3656
3657 getTargetStreamer().EmitAMDKernelCodeT(Header);
3658
3659 return false;
3660}
3661
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003662bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
3663 if (getLexer().isNot(AsmToken::Identifier))
3664 return TokError("expected symbol name");
3665
3666 StringRef KernelName = Parser.getTok().getString();
3667
3668 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
3669 ELF::STT_AMDGPU_HSA_KERNEL);
3670 Lex();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003671 if (!AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI()))
3672 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003673 return false;
3674}
3675
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003676bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003677 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
3678 return Error(getParser().getTok().getLoc(),
3679 ".amd_amdgpu_isa directive is not available on non-amdgcn "
3680 "architectures");
3681 }
3682
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003683 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
3684
3685 std::string ISAVersionStringFromSTI;
3686 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
3687 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
3688
3689 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
3690 return Error(getParser().getTok().getLoc(),
3691 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
3692 "arguments specified through the command line");
3693 }
3694
3695 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
3696 Lex();
3697
3698 return false;
3699}
3700
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003701bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Scott Linderf5b36e52018-12-12 19:39:27 +00003702 const char *AssemblerDirectiveBegin;
3703 const char *AssemblerDirectiveEnd;
3704 std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) =
3705 AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())
3706 ? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin,
3707 HSAMD::V3::AssemblerDirectiveEnd)
3708 : std::make_tuple(HSAMD::AssemblerDirectiveBegin,
3709 HSAMD::AssemblerDirectiveEnd);
3710
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003711 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
3712 return Error(getParser().getTok().getLoc(),
Scott Linderf5b36e52018-12-12 19:39:27 +00003713 (Twine(AssemblerDirectiveBegin) + Twine(" directive is "
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003714 "not available on non-amdhsa OSes")).str());
3715 }
3716
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003717 std::string HSAMetadataString;
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003718 if (ParseToEndDirective(AssemblerDirectiveBegin, AssemblerDirectiveEnd,
3719 HSAMetadataString))
3720 return true;
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003721
Scott Linderf5b36e52018-12-12 19:39:27 +00003722 if (IsaInfo::hasCodeObjectV3(&getSTI())) {
3723 if (!getTargetStreamer().EmitHSAMetadataV3(HSAMetadataString))
3724 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3725 } else {
3726 if (!getTargetStreamer().EmitHSAMetadataV2(HSAMetadataString))
3727 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3728 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003729
3730 return false;
3731}
3732
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003733/// Common code to parse out a block of text (typically YAML) between start and
3734/// end directives.
3735bool AMDGPUAsmParser::ParseToEndDirective(const char *AssemblerDirectiveBegin,
3736 const char *AssemblerDirectiveEnd,
3737 std::string &CollectString) {
3738
3739 raw_string_ostream CollectStream(CollectString);
3740
3741 getLexer().setSkipSpace(false);
3742
3743 bool FoundEnd = false;
3744 while (!getLexer().is(AsmToken::Eof)) {
3745 while (getLexer().is(AsmToken::Space)) {
3746 CollectStream << getLexer().getTok().getString();
3747 Lex();
3748 }
3749
3750 if (getLexer().is(AsmToken::Identifier)) {
3751 StringRef ID = getLexer().getTok().getIdentifier();
3752 if (ID == AssemblerDirectiveEnd) {
3753 Lex();
3754 FoundEnd = true;
3755 break;
3756 }
3757 }
3758
3759 CollectStream << Parser.parseStringToEndOfStatement()
3760 << getContext().getAsmInfo()->getSeparatorString();
3761
3762 Parser.eatToEndOfStatement();
3763 }
3764
3765 getLexer().setSkipSpace(true);
3766
3767 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
3768 return TokError(Twine("expected directive ") +
3769 Twine(AssemblerDirectiveEnd) + Twine(" not found"));
3770 }
3771
3772 CollectStream.flush();
3773 return false;
3774}
3775
3776/// Parse the assembler directive for new MsgPack-format PAL metadata.
3777bool AMDGPUAsmParser::ParseDirectivePALMetadataBegin() {
3778 std::string String;
3779 if (ParseToEndDirective(AMDGPU::PALMD::AssemblerDirectiveBegin,
3780 AMDGPU::PALMD::AssemblerDirectiveEnd, String))
3781 return true;
3782
3783 auto PALMetadata = getTargetStreamer().getPALMetadata();
3784 if (!PALMetadata->setFromString(String))
3785 return Error(getParser().getTok().getLoc(), "invalid PAL metadata");
3786 return false;
3787}
3788
3789/// Parse the assembler directive for old linear-format PAL metadata.
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003790bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003791 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
3792 return Error(getParser().getTok().getLoc(),
3793 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
3794 "not available on non-amdpal OSes")).str());
3795 }
3796
Tim Renoufd737b552019-03-20 17:42:00 +00003797 auto PALMetadata = getTargetStreamer().getPALMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003798 PALMetadata->setLegacy();
Tim Renouf72800f02017-10-03 19:03:52 +00003799 for (;;) {
Tim Renoufd737b552019-03-20 17:42:00 +00003800 uint32_t Key, Value;
3801 if (ParseAsAbsoluteExpression(Key)) {
3802 return TokError(Twine("invalid value in ") +
3803 Twine(PALMD::AssemblerDirective));
3804 }
3805 if (getLexer().isNot(AsmToken::Comma)) {
3806 return TokError(Twine("expected an even number of values in ") +
3807 Twine(PALMD::AssemblerDirective));
3808 }
3809 Lex();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003810 if (ParseAsAbsoluteExpression(Value)) {
3811 return TokError(Twine("invalid value in ") +
3812 Twine(PALMD::AssemblerDirective));
3813 }
Tim Renoufd737b552019-03-20 17:42:00 +00003814 PALMetadata->setRegister(Key, Value);
Tim Renouf72800f02017-10-03 19:03:52 +00003815 if (getLexer().isNot(AsmToken::Comma))
3816 break;
3817 Lex();
3818 }
Tim Renouf72800f02017-10-03 19:03:52 +00003819 return false;
3820}
3821
Tom Stellard45bb48e2015-06-13 03:28:10 +00003822bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00003823 StringRef IDVal = DirectiveID.getString();
3824
Scott Linder1e8c2c72018-06-21 19:38:56 +00003825 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
3826 if (IDVal == ".amdgcn_target")
3827 return ParseDirectiveAMDGCNTarget();
Tom Stellard347ac792015-06-26 21:15:07 +00003828
Scott Linder1e8c2c72018-06-21 19:38:56 +00003829 if (IDVal == ".amdhsa_kernel")
3830 return ParseDirectiveAMDHSAKernel();
Scott Linderf5b36e52018-12-12 19:39:27 +00003831
3832 // TODO: Restructure/combine with PAL metadata directive.
3833 if (IDVal == AMDGPU::HSAMD::V3::AssemblerDirectiveBegin)
3834 return ParseDirectiveHSAMetadata();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003835 } else {
3836 if (IDVal == ".hsa_code_object_version")
3837 return ParseDirectiveHSACodeObjectVersion();
Tom Stellard347ac792015-06-26 21:15:07 +00003838
Scott Linder1e8c2c72018-06-21 19:38:56 +00003839 if (IDVal == ".hsa_code_object_isa")
3840 return ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00003841
Scott Linder1e8c2c72018-06-21 19:38:56 +00003842 if (IDVal == ".amd_kernel_code_t")
3843 return ParseDirectiveAMDKernelCodeT();
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003844
Scott Linder1e8c2c72018-06-21 19:38:56 +00003845 if (IDVal == ".amdgpu_hsa_kernel")
3846 return ParseDirectiveAMDGPUHsaKernel();
3847
3848 if (IDVal == ".amd_amdgpu_isa")
3849 return ParseDirectiveISAVersion();
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003850
Scott Linderf5b36e52018-12-12 19:39:27 +00003851 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
3852 return ParseDirectiveHSAMetadata();
3853 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003854
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003855 if (IDVal == PALMD::AssemblerDirectiveBegin)
3856 return ParseDirectivePALMetadataBegin();
3857
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003858 if (IDVal == PALMD::AssemblerDirective)
3859 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00003860
Tom Stellard45bb48e2015-06-13 03:28:10 +00003861 return true;
3862}
3863
Matt Arsenault68802d32015-11-05 03:11:27 +00003864bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
3865 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003866
3867 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
3868 R.isValid(); ++R) {
3869 if (*R == RegNo)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003870 return isGFX9() || isGFX10();
3871 }
3872
3873 // GFX10 has 2 more SGPRs 104 and 105.
3874 for (MCRegAliasIterator R(AMDGPU::SGPR104_SGPR105, &MRI, true);
3875 R.isValid(); ++R) {
3876 if (*R == RegNo)
3877 return hasSGPR104_SGPR105();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003878 }
3879
3880 switch (RegNo) {
3881 case AMDGPU::TBA:
3882 case AMDGPU::TBA_LO:
3883 case AMDGPU::TBA_HI:
3884 case AMDGPU::TMA:
3885 case AMDGPU::TMA_LO:
3886 case AMDGPU::TMA_HI:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003887 return !isGFX9() && !isGFX10();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00003888 case AMDGPU::XNACK_MASK:
3889 case AMDGPU::XNACK_MASK_LO:
3890 case AMDGPU::XNACK_MASK_HI:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003891 return !isCI() && !isSI() && !isGFX10() && hasXNACK();
3892 case AMDGPU::SGPR_NULL:
3893 return isGFX10();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003894 default:
3895 break;
3896 }
3897
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00003898 if (isInlineValue(RegNo))
3899 return !isCI() && !isSI() && !isVI();
3900
Matt Arsenault3b159672015-12-01 20:31:08 +00003901 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00003902 return true;
3903
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003904 if (isSI() || isGFX10()) {
3905 // No flat_scr on SI.
3906 // On GFX10 flat scratch is not a valid register operand and can only be
3907 // accessed with s_setreg/s_getreg.
Matt Arsenault3b159672015-12-01 20:31:08 +00003908 switch (RegNo) {
3909 case AMDGPU::FLAT_SCR:
3910 case AMDGPU::FLAT_SCR_LO:
3911 case AMDGPU::FLAT_SCR_HI:
3912 return false;
3913 default:
3914 return true;
3915 }
3916 }
3917
Matt Arsenault68802d32015-11-05 03:11:27 +00003918 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
3919 // SI/CI have.
3920 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
3921 R.isValid(); ++R) {
3922 if (*R == RegNo)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003923 return hasSGPR102_SGPR103();
Matt Arsenault68802d32015-11-05 03:11:27 +00003924 }
3925
3926 return true;
3927}
3928
Alex Bradbury58eba092016-11-01 16:32:05 +00003929OperandMatchResultTy
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003930AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic,
3931 OperandMode Mode) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003932 // Try to parse with a custom parser
3933 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3934
3935 // If we successfully parsed the operand or if there as an error parsing,
3936 // we are done.
3937 //
3938 // If we are parsing after we reach EndOfStatement then this means we
3939 // are appending default values to the Operands list. This is only done
3940 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00003941 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00003942 getLexer().is(AsmToken::EndOfStatement))
3943 return ResTy;
3944
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003945 if (Mode == OperandMode_NSA && getLexer().is(AsmToken::LBrac)) {
3946 unsigned Prefix = Operands.size();
3947 SMLoc LBraceLoc = getTok().getLoc();
3948 Parser.Lex(); // eat the '['
3949
3950 for (;;) {
3951 ResTy = parseReg(Operands);
3952 if (ResTy != MatchOperand_Success)
3953 return ResTy;
3954
3955 if (getLexer().is(AsmToken::RBrac))
3956 break;
3957
3958 if (getLexer().isNot(AsmToken::Comma))
3959 return MatchOperand_ParseFail;
3960 Parser.Lex();
3961 }
3962
3963 if (Operands.size() - Prefix > 1) {
3964 Operands.insert(Operands.begin() + Prefix,
3965 AMDGPUOperand::CreateToken(this, "[", LBraceLoc));
3966 Operands.push_back(AMDGPUOperand::CreateToken(this, "]",
3967 getTok().getLoc()));
3968 }
3969
3970 Parser.Lex(); // eat the ']'
3971 return MatchOperand_Success;
3972 }
3973
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00003974 return parseRegOrImm(Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003975}
3976
Sam Kolton05ef1c92016-06-03 10:27:37 +00003977StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
3978 // Clear any forced encodings from the previous instruction.
3979 setForcedEncodingSize(0);
3980 setForcedDPP(false);
3981 setForcedSDWA(false);
3982
3983 if (Name.endswith("_e64")) {
3984 setForcedEncodingSize(64);
3985 return Name.substr(0, Name.size() - 4);
3986 } else if (Name.endswith("_e32")) {
3987 setForcedEncodingSize(32);
3988 return Name.substr(0, Name.size() - 4);
3989 } else if (Name.endswith("_dpp")) {
3990 setForcedDPP(true);
3991 return Name.substr(0, Name.size() - 4);
3992 } else if (Name.endswith("_sdwa")) {
3993 setForcedSDWA(true);
3994 return Name.substr(0, Name.size() - 5);
3995 }
3996 return Name;
3997}
3998
Tom Stellard45bb48e2015-06-13 03:28:10 +00003999bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
4000 StringRef Name,
4001 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00004002 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00004003 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004004 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00004005
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004006 bool IsMIMG = Name.startswith("image_");
4007
Tom Stellard45bb48e2015-06-13 03:28:10 +00004008 while (!getLexer().is(AsmToken::EndOfStatement)) {
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004009 OperandMode Mode = OperandMode_Default;
4010 if (IsMIMG && isGFX10() && Operands.size() == 2)
4011 Mode = OperandMode_NSA;
4012 OperandMatchResultTy Res = parseOperand(Operands, Name, Mode);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004013
4014 // Eat the comma or space if there is one.
4015 if (getLexer().is(AsmToken::Comma))
4016 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00004017
Tom Stellard45bb48e2015-06-13 03:28:10 +00004018 switch (Res) {
4019 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004020 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00004021 Error(getLexer().getLoc(), "failed parsing operand.");
4022 while (!getLexer().is(AsmToken::EndOfStatement)) {
4023 Parser.Lex();
4024 }
4025 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004026 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00004027 Error(getLexer().getLoc(), "not a valid operand.");
4028 while (!getLexer().is(AsmToken::EndOfStatement)) {
4029 Parser.Lex();
4030 }
4031 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004032 }
4033 }
4034
Tom Stellard45bb48e2015-06-13 03:28:10 +00004035 return false;
4036}
4037
4038//===----------------------------------------------------------------------===//
4039// Utility functions
4040//===----------------------------------------------------------------------===//
4041
Alex Bradbury58eba092016-11-01 16:32:05 +00004042OperandMatchResultTy
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004043AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &IntVal) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00004044
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004045 if (!trySkipId(Prefix, AsmToken::Colon))
4046 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004047
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004048 return parseExpr(IntVal) ? MatchOperand_Success : MatchOperand_ParseFail;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004049}
4050
Alex Bradbury58eba092016-11-01 16:32:05 +00004051OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004052AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004053 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004054 bool (*ConvertResult)(int64_t&)) {
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004055 SMLoc S = getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004056 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004057
Alex Bradbury58eba092016-11-01 16:32:05 +00004058 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004059 if (Res != MatchOperand_Success)
4060 return Res;
4061
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004062 if (ConvertResult && !ConvertResult(Value)) {
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004063 Error(S, "invalid " + StringRef(Prefix) + " value.");
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004064 }
4065
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004066 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004067 return MatchOperand_Success;
4068}
4069
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004070OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
4071 const char *Prefix,
4072 OperandVector &Operands,
4073 AMDGPUOperand::ImmTy ImmTy,
4074 bool (*ConvertResult)(int64_t&)) {
4075 StringRef Name = Parser.getTok().getString();
4076 if (!Name.equals(Prefix))
4077 return MatchOperand_NoMatch;
4078
4079 Parser.Lex();
4080 if (getLexer().isNot(AsmToken::Colon))
4081 return MatchOperand_ParseFail;
4082
4083 Parser.Lex();
4084 if (getLexer().isNot(AsmToken::LBrac))
4085 return MatchOperand_ParseFail;
4086 Parser.Lex();
4087
4088 unsigned Val = 0;
4089 SMLoc S = Parser.getTok().getLoc();
4090
4091 // FIXME: How to verify the number of elements matches the number of src
4092 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004093 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004094 if (I != 0) {
4095 if (getLexer().is(AsmToken::RBrac))
4096 break;
4097
4098 if (getLexer().isNot(AsmToken::Comma))
4099 return MatchOperand_ParseFail;
4100 Parser.Lex();
4101 }
4102
4103 if (getLexer().isNot(AsmToken::Integer))
4104 return MatchOperand_ParseFail;
4105
4106 int64_t Op;
4107 if (getParser().parseAbsoluteExpression(Op))
4108 return MatchOperand_ParseFail;
4109
4110 if (Op != 0 && Op != 1)
4111 return MatchOperand_ParseFail;
4112 Val |= (Op << I);
4113 }
4114
4115 Parser.Lex();
4116 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
4117 return MatchOperand_Success;
4118}
4119
Alex Bradbury58eba092016-11-01 16:32:05 +00004120OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004121AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004122 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00004123 int64_t Bit = 0;
4124 SMLoc S = Parser.getTok().getLoc();
4125
4126 // We are at the end of the statement, and this is a default argument, so
4127 // use a default value.
4128 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4129 switch(getLexer().getKind()) {
4130 case AsmToken::Identifier: {
4131 StringRef Tok = Parser.getTok().getString();
4132 if (Tok == Name) {
Ryan Taylor1f334d02018-08-28 15:07:30 +00004133 if (Tok == "r128" && isGFX9())
4134 Error(S, "r128 modifier is not supported on this GPU");
4135 if (Tok == "a16" && !isGFX9())
4136 Error(S, "a16 modifier is not supported on this GPU");
Tom Stellard45bb48e2015-06-13 03:28:10 +00004137 Bit = 1;
4138 Parser.Lex();
4139 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
4140 Bit = 0;
4141 Parser.Lex();
4142 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00004143 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004144 }
4145 break;
4146 }
4147 default:
4148 return MatchOperand_NoMatch;
4149 }
4150 }
4151
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00004152 if (!isGFX10() && ImmTy == AMDGPUOperand::ImmTyDLC)
4153 return MatchOperand_ParseFail;
4154
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004155 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004156 return MatchOperand_Success;
4157}
4158
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004159static void addOptionalImmOperand(
4160 MCInst& Inst, const OperandVector& Operands,
4161 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
4162 AMDGPUOperand::ImmTy ImmT,
4163 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004164 auto i = OptionalIdx.find(ImmT);
4165 if (i != OptionalIdx.end()) {
4166 unsigned Idx = i->second;
4167 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
4168 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00004169 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004170 }
4171}
4172
Alex Bradbury58eba092016-11-01 16:32:05 +00004173OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00004174AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00004175 if (getLexer().isNot(AsmToken::Identifier)) {
4176 return MatchOperand_NoMatch;
4177 }
4178 StringRef Tok = Parser.getTok().getString();
4179 if (Tok != Prefix) {
4180 return MatchOperand_NoMatch;
4181 }
4182
4183 Parser.Lex();
4184 if (getLexer().isNot(AsmToken::Colon)) {
4185 return MatchOperand_ParseFail;
4186 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00004187
Sam Kolton3025e7f2016-04-26 13:33:56 +00004188 Parser.Lex();
4189 if (getLexer().isNot(AsmToken::Identifier)) {
4190 return MatchOperand_ParseFail;
4191 }
4192
4193 Value = Parser.getTok().getString();
4194 return MatchOperand_Success;
4195}
4196
Tim Renouf35484c92018-08-21 11:06:05 +00004197// dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their
4198// values to live in a joint format operand in the MCInst encoding.
4199OperandMatchResultTy
4200AMDGPUAsmParser::parseDfmtNfmt(OperandVector &Operands) {
4201 SMLoc S = Parser.getTok().getLoc();
4202 int64_t Dfmt = 0, Nfmt = 0;
4203 // dfmt and nfmt can appear in either order, and each is optional.
4204 bool GotDfmt = false, GotNfmt = false;
4205 while (!GotDfmt || !GotNfmt) {
4206 if (!GotDfmt) {
4207 auto Res = parseIntWithPrefix("dfmt", Dfmt);
4208 if (Res != MatchOperand_NoMatch) {
4209 if (Res != MatchOperand_Success)
4210 return Res;
4211 if (Dfmt >= 16) {
4212 Error(Parser.getTok().getLoc(), "out of range dfmt");
4213 return MatchOperand_ParseFail;
4214 }
4215 GotDfmt = true;
4216 Parser.Lex();
4217 continue;
4218 }
4219 }
4220 if (!GotNfmt) {
4221 auto Res = parseIntWithPrefix("nfmt", Nfmt);
4222 if (Res != MatchOperand_NoMatch) {
4223 if (Res != MatchOperand_Success)
4224 return Res;
4225 if (Nfmt >= 8) {
4226 Error(Parser.getTok().getLoc(), "out of range nfmt");
4227 return MatchOperand_ParseFail;
4228 }
4229 GotNfmt = true;
4230 Parser.Lex();
4231 continue;
4232 }
4233 }
4234 break;
4235 }
4236 if (!GotDfmt && !GotNfmt)
4237 return MatchOperand_NoMatch;
4238 auto Format = Dfmt | Nfmt << 4;
4239 Operands.push_back(
4240 AMDGPUOperand::CreateImm(this, Format, S, AMDGPUOperand::ImmTyFORMAT));
4241 return MatchOperand_Success;
4242}
4243
Tom Stellard45bb48e2015-06-13 03:28:10 +00004244//===----------------------------------------------------------------------===//
4245// ds
4246//===----------------------------------------------------------------------===//
4247
Tom Stellard45bb48e2015-06-13 03:28:10 +00004248void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
4249 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004250 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004251
4252 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4253 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4254
4255 // Add the register arguments
4256 if (Op.isReg()) {
4257 Op.addRegOperands(Inst, 1);
4258 continue;
4259 }
4260
4261 // Handle optional arguments
4262 OptionalIdx[Op.getImmTy()] = i;
4263 }
4264
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004265 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
4266 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004267 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004268
Tom Stellard45bb48e2015-06-13 03:28:10 +00004269 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
4270}
4271
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004272void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
4273 bool IsGdsHardcoded) {
4274 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004275
4276 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4277 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4278
4279 // Add the register arguments
4280 if (Op.isReg()) {
4281 Op.addRegOperands(Inst, 1);
4282 continue;
4283 }
4284
4285 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00004286 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004287 continue;
4288 }
4289
4290 // Handle optional arguments
4291 OptionalIdx[Op.getImmTy()] = i;
4292 }
4293
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004294 AMDGPUOperand::ImmTy OffsetType =
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00004295 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_gfx10 ||
4296 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_gfx6_gfx7 ||
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004297 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
4298 AMDGPUOperand::ImmTyOffset;
4299
4300 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
4301
Artem Tamazov43b61562017-02-03 12:47:30 +00004302 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004303 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004304 }
4305 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
4306}
4307
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004308void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
4309 OptionalImmIndexMap OptionalIdx;
4310
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004311 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004312 unsigned EnMask = 0;
4313 int SrcIdx = 0;
4314
4315 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4316 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4317
4318 // Add the register arguments
4319 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004320 assert(SrcIdx < 4);
4321 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004322 Op.addRegOperands(Inst, 1);
4323 ++SrcIdx;
4324 continue;
4325 }
4326
4327 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004328 assert(SrcIdx < 4);
4329 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004330 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004331 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004332 continue;
4333 }
4334
4335 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
4336 Op.addImmOperands(Inst, 1);
4337 continue;
4338 }
4339
4340 if (Op.isToken() && Op.getToken() == "done")
4341 continue;
4342
4343 // Handle optional arguments
4344 OptionalIdx[Op.getImmTy()] = i;
4345 }
4346
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004347 assert(SrcIdx == 4);
4348
4349 bool Compr = false;
4350 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
4351 Compr = true;
4352 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
4353 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
4354 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
4355 }
4356
4357 for (auto i = 0; i < SrcIdx; ++i) {
4358 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
4359 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
4360 }
4361 }
4362
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004363 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
4364 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
4365
4366 Inst.addOperand(MCOperand::createImm(EnMask));
4367}
Tom Stellard45bb48e2015-06-13 03:28:10 +00004368
4369//===----------------------------------------------------------------------===//
4370// s_waitcnt
4371//===----------------------------------------------------------------------===//
4372
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004373static bool
4374encodeCnt(
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004375 const AMDGPU::IsaVersion ISA,
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004376 int64_t &IntVal,
4377 int64_t CntVal,
4378 bool Saturate,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004379 unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned),
4380 unsigned (*decode)(const IsaVersion &Version, unsigned))
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004381{
4382 bool Failed = false;
4383
4384 IntVal = encode(ISA, IntVal, CntVal);
4385 if (CntVal != decode(ISA, IntVal)) {
4386 if (Saturate) {
4387 IntVal = encode(ISA, IntVal, -1);
4388 } else {
4389 Failed = true;
4390 }
4391 }
4392 return Failed;
4393}
4394
Tom Stellard45bb48e2015-06-13 03:28:10 +00004395bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
4396 StringRef CntName = Parser.getTok().getString();
4397 int64_t CntVal;
4398
4399 Parser.Lex();
4400 if (getLexer().isNot(AsmToken::LParen))
4401 return true;
4402
4403 Parser.Lex();
4404 if (getLexer().isNot(AsmToken::Integer))
4405 return true;
4406
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004407 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00004408 if (getParser().parseAbsoluteExpression(CntVal))
4409 return true;
4410
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004411 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Tom Stellard45bb48e2015-06-13 03:28:10 +00004412
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004413 bool Failed = true;
4414 bool Sat = CntName.endswith("_sat");
4415
4416 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
4417 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
4418 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
4419 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
4420 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
4421 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
4422 }
4423
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004424 if (Failed) {
4425 Error(ValLoc, "too large value for " + CntName);
4426 return true;
4427 }
4428
4429 if (getLexer().isNot(AsmToken::RParen)) {
4430 return true;
4431 }
4432
4433 Parser.Lex();
4434 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
4435 const AsmToken NextToken = getLexer().peekTok();
4436 if (NextToken.is(AsmToken::Identifier)) {
4437 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004438 }
4439 }
4440
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004441 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004442}
4443
Alex Bradbury58eba092016-11-01 16:32:05 +00004444OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004445AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004446 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00004447 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004448 SMLoc S = Parser.getTok().getLoc();
4449
4450 switch(getLexer().getKind()) {
4451 default: return MatchOperand_ParseFail;
4452 case AsmToken::Integer:
4453 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004454 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00004455 return MatchOperand_ParseFail;
4456 break;
4457
4458 case AsmToken::Identifier:
4459 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004460 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00004461 return MatchOperand_ParseFail;
4462 } while(getLexer().isNot(AsmToken::EndOfStatement));
4463 break;
4464 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004465 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004466 return MatchOperand_Success;
4467}
4468
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004469bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
4470 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004471 using namespace llvm::AMDGPU::Hwreg;
4472
Artem Tamazovd6468662016-04-25 14:13:51 +00004473 if (Parser.getTok().getString() != "hwreg")
4474 return true;
4475 Parser.Lex();
4476
4477 if (getLexer().isNot(AsmToken::LParen))
4478 return true;
4479 Parser.Lex();
4480
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004481 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004482 HwReg.IsSymbolic = true;
4483 HwReg.Id = ID_UNKNOWN_;
4484 const StringRef tok = Parser.getTok().getString();
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00004485 int Last = ID_SYMBOLIC_LAST_;
4486 if (isSI() || isCI() || isVI())
4487 Last = ID_SYMBOLIC_FIRST_GFX9_;
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004488 else if (isGFX9())
4489 Last = ID_SYMBOLIC_FIRST_GFX10_;
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00004490 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004491 if (tok == IdSymbolic[i]) {
4492 HwReg.Id = i;
4493 break;
4494 }
4495 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004496 Parser.Lex();
4497 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004498 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004499 if (getLexer().isNot(AsmToken::Integer))
4500 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004501 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004502 return true;
4503 }
Artem Tamazovd6468662016-04-25 14:13:51 +00004504
4505 if (getLexer().is(AsmToken::RParen)) {
4506 Parser.Lex();
4507 return false;
4508 }
4509
4510 // optional params
4511 if (getLexer().isNot(AsmToken::Comma))
4512 return true;
4513 Parser.Lex();
4514
4515 if (getLexer().isNot(AsmToken::Integer))
4516 return true;
4517 if (getParser().parseAbsoluteExpression(Offset))
4518 return true;
4519
4520 if (getLexer().isNot(AsmToken::Comma))
4521 return true;
4522 Parser.Lex();
4523
4524 if (getLexer().isNot(AsmToken::Integer))
4525 return true;
4526 if (getParser().parseAbsoluteExpression(Width))
4527 return true;
4528
4529 if (getLexer().isNot(AsmToken::RParen))
4530 return true;
4531 Parser.Lex();
4532
4533 return false;
4534}
4535
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004536OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004537 using namespace llvm::AMDGPU::Hwreg;
4538
Artem Tamazovd6468662016-04-25 14:13:51 +00004539 int64_t Imm16Val = 0;
4540 SMLoc S = Parser.getTok().getLoc();
4541
4542 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00004543 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00004544 case AsmToken::Integer:
4545 // The operand can be an integer value.
4546 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00004547 return MatchOperand_NoMatch;
4548 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00004549 Error(S, "invalid immediate: only 16-bit values are legal");
4550 // Do not return error code, but create an imm operand anyway and proceed
4551 // to the next operand, if any. That avoids unneccessary error messages.
4552 }
4553 break;
4554
4555 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004556 OperandInfoTy HwReg(ID_UNKNOWN_);
4557 int64_t Offset = OFFSET_DEFAULT_;
4558 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
4559 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00004560 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004561 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
4562 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004563 Error(S, "invalid symbolic name of hardware register");
4564 else
4565 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00004566 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004567 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00004568 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004569 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00004570 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004571 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00004572 }
4573 break;
4574 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004575 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00004576 return MatchOperand_Success;
4577}
4578
Tom Stellard45bb48e2015-06-13 03:28:10 +00004579bool AMDGPUOperand::isSWaitCnt() const {
4580 return isImm();
4581}
4582
Artem Tamazovd6468662016-04-25 14:13:51 +00004583bool AMDGPUOperand::isHwreg() const {
4584 return isImmTy(ImmTyHwreg);
4585}
4586
Artem Tamazov6edc1352016-05-26 17:00:33 +00004587bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004588 using namespace llvm::AMDGPU::SendMsg;
4589
4590 if (Parser.getTok().getString() != "sendmsg")
4591 return true;
4592 Parser.Lex();
4593
4594 if (getLexer().isNot(AsmToken::LParen))
4595 return true;
4596 Parser.Lex();
4597
4598 if (getLexer().is(AsmToken::Identifier)) {
4599 Msg.IsSymbolic = true;
4600 Msg.Id = ID_UNKNOWN_;
4601 const std::string tok = Parser.getTok().getString();
4602 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
4603 switch(i) {
4604 default: continue; // Omit gaps.
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004605 case ID_GS_ALLOC_REQ:
4606 if (isSI() || isCI() || isVI())
4607 continue;
4608 break;
4609 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE:
4610 case ID_SYSMSG: break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004611 }
4612 if (tok == IdSymbolic[i]) {
4613 Msg.Id = i;
4614 break;
4615 }
4616 }
4617 Parser.Lex();
4618 } else {
4619 Msg.IsSymbolic = false;
4620 if (getLexer().isNot(AsmToken::Integer))
4621 return true;
4622 if (getParser().parseAbsoluteExpression(Msg.Id))
4623 return true;
4624 if (getLexer().is(AsmToken::Integer))
4625 if (getParser().parseAbsoluteExpression(Msg.Id))
4626 Msg.Id = ID_UNKNOWN_;
4627 }
4628 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
4629 return false;
4630
4631 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
4632 if (getLexer().isNot(AsmToken::RParen))
4633 return true;
4634 Parser.Lex();
4635 return false;
4636 }
4637
4638 if (getLexer().isNot(AsmToken::Comma))
4639 return true;
4640 Parser.Lex();
4641
4642 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
4643 Operation.Id = ID_UNKNOWN_;
4644 if (getLexer().is(AsmToken::Identifier)) {
4645 Operation.IsSymbolic = true;
4646 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
4647 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
4648 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004649 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004650 for (int i = F; i < L; ++i) {
4651 if (Tok == S[i]) {
4652 Operation.Id = i;
4653 break;
4654 }
4655 }
4656 Parser.Lex();
4657 } else {
4658 Operation.IsSymbolic = false;
4659 if (getLexer().isNot(AsmToken::Integer))
4660 return true;
4661 if (getParser().parseAbsoluteExpression(Operation.Id))
4662 return true;
4663 }
4664
4665 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4666 // Stream id is optional.
4667 if (getLexer().is(AsmToken::RParen)) {
4668 Parser.Lex();
4669 return false;
4670 }
4671
4672 if (getLexer().isNot(AsmToken::Comma))
4673 return true;
4674 Parser.Lex();
4675
4676 if (getLexer().isNot(AsmToken::Integer))
4677 return true;
4678 if (getParser().parseAbsoluteExpression(StreamId))
4679 return true;
4680 }
4681
4682 if (getLexer().isNot(AsmToken::RParen))
4683 return true;
4684 Parser.Lex();
4685 return false;
4686}
4687
Matt Arsenault0e8a2992016-12-15 20:40:20 +00004688OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
4689 if (getLexer().getKind() != AsmToken::Identifier)
4690 return MatchOperand_NoMatch;
4691
4692 StringRef Str = Parser.getTok().getString();
4693 int Slot = StringSwitch<int>(Str)
4694 .Case("p10", 0)
4695 .Case("p20", 1)
4696 .Case("p0", 2)
4697 .Default(-1);
4698
4699 SMLoc S = Parser.getTok().getLoc();
4700 if (Slot == -1)
4701 return MatchOperand_ParseFail;
4702
4703 Parser.Lex();
4704 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
4705 AMDGPUOperand::ImmTyInterpSlot));
4706 return MatchOperand_Success;
4707}
4708
4709OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
4710 if (getLexer().getKind() != AsmToken::Identifier)
4711 return MatchOperand_NoMatch;
4712
4713 StringRef Str = Parser.getTok().getString();
4714 if (!Str.startswith("attr"))
4715 return MatchOperand_NoMatch;
4716
4717 StringRef Chan = Str.take_back(2);
4718 int AttrChan = StringSwitch<int>(Chan)
4719 .Case(".x", 0)
4720 .Case(".y", 1)
4721 .Case(".z", 2)
4722 .Case(".w", 3)
4723 .Default(-1);
4724 if (AttrChan == -1)
4725 return MatchOperand_ParseFail;
4726
4727 Str = Str.drop_back(2).drop_front(4);
4728
4729 uint8_t Attr;
4730 if (Str.getAsInteger(10, Attr))
4731 return MatchOperand_ParseFail;
4732
4733 SMLoc S = Parser.getTok().getLoc();
4734 Parser.Lex();
4735 if (Attr > 63) {
4736 Error(S, "out of bounds attr");
4737 return MatchOperand_Success;
4738 }
4739
4740 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
4741
4742 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
4743 AMDGPUOperand::ImmTyInterpAttr));
4744 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
4745 AMDGPUOperand::ImmTyAttrChan));
4746 return MatchOperand_Success;
4747}
4748
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004749void AMDGPUAsmParser::errorExpTgt() {
4750 Error(Parser.getTok().getLoc(), "invalid exp target");
4751}
4752
4753OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
4754 uint8_t &Val) {
4755 if (Str == "null") {
4756 Val = 9;
4757 return MatchOperand_Success;
4758 }
4759
4760 if (Str.startswith("mrt")) {
4761 Str = Str.drop_front(3);
4762 if (Str == "z") { // == mrtz
4763 Val = 8;
4764 return MatchOperand_Success;
4765 }
4766
4767 if (Str.getAsInteger(10, Val))
4768 return MatchOperand_ParseFail;
4769
4770 if (Val > 7)
4771 errorExpTgt();
4772
4773 return MatchOperand_Success;
4774 }
4775
4776 if (Str.startswith("pos")) {
4777 Str = Str.drop_front(3);
4778 if (Str.getAsInteger(10, Val))
4779 return MatchOperand_ParseFail;
4780
Stanislav Mekhanoshin1dbf7212019-05-08 21:23:37 +00004781 if (Val > 4 || (Val == 4 && !isGFX10()))
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004782 errorExpTgt();
4783
4784 Val += 12;
4785 return MatchOperand_Success;
4786 }
4787
Stanislav Mekhanoshin1dbf7212019-05-08 21:23:37 +00004788 if (isGFX10() && Str == "prim") {
4789 Val = 20;
4790 return MatchOperand_Success;
4791 }
4792
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004793 if (Str.startswith("param")) {
4794 Str = Str.drop_front(5);
4795 if (Str.getAsInteger(10, Val))
4796 return MatchOperand_ParseFail;
4797
4798 if (Val >= 32)
4799 errorExpTgt();
4800
4801 Val += 32;
4802 return MatchOperand_Success;
4803 }
4804
4805 if (Str.startswith("invalid_target_")) {
4806 Str = Str.drop_front(15);
4807 if (Str.getAsInteger(10, Val))
4808 return MatchOperand_ParseFail;
4809
4810 errorExpTgt();
4811 return MatchOperand_Success;
4812 }
4813
4814 return MatchOperand_NoMatch;
4815}
4816
4817OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
4818 uint8_t Val;
4819 StringRef Str = Parser.getTok().getString();
4820
4821 auto Res = parseExpTgtImpl(Str, Val);
4822 if (Res != MatchOperand_Success)
4823 return Res;
4824
4825 SMLoc S = Parser.getTok().getLoc();
4826 Parser.Lex();
4827
4828 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
4829 AMDGPUOperand::ImmTyExpTgt));
4830 return MatchOperand_Success;
4831}
4832
Alex Bradbury58eba092016-11-01 16:32:05 +00004833OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004834AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
4835 using namespace llvm::AMDGPU::SendMsg;
4836
4837 int64_t Imm16Val = 0;
4838 SMLoc S = Parser.getTok().getLoc();
4839
4840 switch(getLexer().getKind()) {
4841 default:
4842 return MatchOperand_NoMatch;
4843 case AsmToken::Integer:
4844 // The operand can be an integer value.
4845 if (getParser().parseAbsoluteExpression(Imm16Val))
4846 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004847 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004848 Error(S, "invalid immediate: only 16-bit values are legal");
4849 // Do not return error code, but create an imm operand anyway and proceed
4850 // to the next operand, if any. That avoids unneccessary error messages.
4851 }
4852 break;
4853 case AsmToken::Identifier: {
4854 OperandInfoTy Msg(ID_UNKNOWN_);
4855 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00004856 int64_t StreamId = STREAM_ID_DEFAULT_;
4857 if (parseSendMsgConstruct(Msg, Operation, StreamId))
4858 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004859 do {
4860 // Validate and encode message ID.
4861 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004862 || (Msg.Id == ID_GS_ALLOC_REQ && !isSI() && !isCI() && !isVI())
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004863 || Msg.Id == ID_SYSMSG)) {
4864 if (Msg.IsSymbolic)
4865 Error(S, "invalid/unsupported symbolic name of message");
4866 else
4867 Error(S, "invalid/unsupported code of message");
4868 break;
4869 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004870 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004871 // Validate and encode operation ID.
4872 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
4873 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
4874 if (Operation.IsSymbolic)
4875 Error(S, "invalid symbolic name of GS_OP");
4876 else
4877 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
4878 break;
4879 }
4880 if (Operation.Id == OP_GS_NOP
4881 && Msg.Id != ID_GS_DONE) {
4882 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
4883 break;
4884 }
4885 Imm16Val |= (Operation.Id << OP_SHIFT_);
4886 }
4887 if (Msg.Id == ID_SYSMSG) {
4888 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
4889 if (Operation.IsSymbolic)
4890 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
4891 else
4892 Error(S, "invalid/unsupported code of SYSMSG_OP");
4893 break;
4894 }
4895 Imm16Val |= (Operation.Id << OP_SHIFT_);
4896 }
4897 // Validate and encode stream ID.
4898 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4899 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
4900 Error(S, "invalid stream id: only 2-bit values are legal");
4901 break;
4902 }
4903 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
4904 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004905 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004906 }
4907 break;
4908 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004909 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004910 return MatchOperand_Success;
4911}
4912
4913bool AMDGPUOperand::isSendMsg() const {
4914 return isImmTy(ImmTySendMsg);
4915}
4916
Tom Stellard45bb48e2015-06-13 03:28:10 +00004917//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004918// parser helpers
4919//===----------------------------------------------------------------------===//
4920
4921bool
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004922AMDGPUAsmParser::isId(const AsmToken &Token, const StringRef Id) const {
4923 return Token.is(AsmToken::Identifier) && Token.getString() == Id;
4924}
4925
4926bool
4927AMDGPUAsmParser::isId(const StringRef Id) const {
4928 return isId(getToken(), Id);
4929}
4930
4931bool
4932AMDGPUAsmParser::isToken(const AsmToken::TokenKind Kind) const {
4933 return getTokenKind() == Kind;
4934}
4935
4936bool
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004937AMDGPUAsmParser::trySkipId(const StringRef Id) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004938 if (isId(Id)) {
4939 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004940 return true;
4941 }
4942 return false;
4943}
4944
4945bool
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004946AMDGPUAsmParser::trySkipId(const StringRef Id, const AsmToken::TokenKind Kind) {
4947 if (isId(Id) && peekToken().is(Kind)) {
4948 lex();
4949 lex();
4950 return true;
4951 }
4952 return false;
4953}
4954
4955bool
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004956AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004957 if (isToken(Kind)) {
4958 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004959 return true;
4960 }
4961 return false;
4962}
4963
4964bool
4965AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
4966 const StringRef ErrMsg) {
4967 if (!trySkipToken(Kind)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004968 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004969 return false;
4970 }
4971 return true;
4972}
4973
4974bool
4975AMDGPUAsmParser::parseExpr(int64_t &Imm) {
4976 return !getParser().parseAbsoluteExpression(Imm);
4977}
4978
4979bool
4980AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004981 if (isToken(AsmToken::String)) {
4982 Val = getToken().getStringContents();
4983 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004984 return true;
4985 } else {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004986 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004987 return false;
4988 }
4989}
4990
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004991AsmToken
4992AMDGPUAsmParser::getToken() const {
4993 return Parser.getTok();
4994}
4995
4996AsmToken
4997AMDGPUAsmParser::peekToken() {
4998 return getLexer().peekTok();
4999}
5000
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00005001void
5002AMDGPUAsmParser::peekTokens(MutableArrayRef<AsmToken> Tokens) {
5003 auto TokCount = getLexer().peekTokens(Tokens);
5004
5005 for (auto Idx = TokCount; Idx < Tokens.size(); ++Idx)
5006 Tokens[Idx] = AsmToken(AsmToken::Error, "");
5007}
5008
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005009AsmToken::TokenKind
5010AMDGPUAsmParser::getTokenKind() const {
5011 return getLexer().getKind();
5012}
5013
5014SMLoc
5015AMDGPUAsmParser::getLoc() const {
5016 return getToken().getLoc();
5017}
5018
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00005019StringRef
5020AMDGPUAsmParser::getTokenStr() const {
5021 return getToken().getString();
5022}
5023
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005024void
5025AMDGPUAsmParser::lex() {
5026 Parser.Lex();
5027}
5028
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005029//===----------------------------------------------------------------------===//
5030// swizzle
5031//===----------------------------------------------------------------------===//
5032
5033LLVM_READNONE
5034static unsigned
5035encodeBitmaskPerm(const unsigned AndMask,
5036 const unsigned OrMask,
5037 const unsigned XorMask) {
5038 using namespace llvm::AMDGPU::Swizzle;
5039
5040 return BITMASK_PERM_ENC |
5041 (AndMask << BITMASK_AND_SHIFT) |
5042 (OrMask << BITMASK_OR_SHIFT) |
5043 (XorMask << BITMASK_XOR_SHIFT);
5044}
5045
5046bool
5047AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
5048 const unsigned MinVal,
5049 const unsigned MaxVal,
5050 const StringRef ErrMsg) {
5051 for (unsigned i = 0; i < OpNum; ++i) {
5052 if (!skipToken(AsmToken::Comma, "expected a comma")){
5053 return false;
5054 }
5055 SMLoc ExprLoc = Parser.getTok().getLoc();
5056 if (!parseExpr(Op[i])) {
5057 return false;
5058 }
5059 if (Op[i] < MinVal || Op[i] > MaxVal) {
5060 Error(ExprLoc, ErrMsg);
5061 return false;
5062 }
5063 }
5064
5065 return true;
5066}
5067
5068bool
5069AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
5070 using namespace llvm::AMDGPU::Swizzle;
5071
5072 int64_t Lane[LANE_NUM];
5073 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
5074 "expected a 2-bit lane id")) {
5075 Imm = QUAD_PERM_ENC;
Stanislav Mekhanoshin266f1572019-03-11 16:49:32 +00005076 for (unsigned I = 0; I < LANE_NUM; ++I) {
5077 Imm |= Lane[I] << (LANE_SHIFT * I);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005078 }
5079 return true;
5080 }
5081 return false;
5082}
5083
5084bool
5085AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
5086 using namespace llvm::AMDGPU::Swizzle;
5087
5088 SMLoc S = Parser.getTok().getLoc();
5089 int64_t GroupSize;
5090 int64_t LaneIdx;
5091
5092 if (!parseSwizzleOperands(1, &GroupSize,
5093 2, 32,
5094 "group size must be in the interval [2,32]")) {
5095 return false;
5096 }
5097 if (!isPowerOf2_64(GroupSize)) {
5098 Error(S, "group size must be a power of two");
5099 return false;
5100 }
5101 if (parseSwizzleOperands(1, &LaneIdx,
5102 0, GroupSize - 1,
5103 "lane id must be in the interval [0,group size - 1]")) {
5104 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
5105 return true;
5106 }
5107 return false;
5108}
5109
5110bool
5111AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
5112 using namespace llvm::AMDGPU::Swizzle;
5113
5114 SMLoc S = Parser.getTok().getLoc();
5115 int64_t GroupSize;
5116
5117 if (!parseSwizzleOperands(1, &GroupSize,
5118 2, 32, "group size must be in the interval [2,32]")) {
5119 return false;
5120 }
5121 if (!isPowerOf2_64(GroupSize)) {
5122 Error(S, "group size must be a power of two");
5123 return false;
5124 }
5125
5126 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
5127 return true;
5128}
5129
5130bool
5131AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
5132 using namespace llvm::AMDGPU::Swizzle;
5133
5134 SMLoc S = Parser.getTok().getLoc();
5135 int64_t GroupSize;
5136
5137 if (!parseSwizzleOperands(1, &GroupSize,
5138 1, 16, "group size must be in the interval [1,16]")) {
5139 return false;
5140 }
5141 if (!isPowerOf2_64(GroupSize)) {
5142 Error(S, "group size must be a power of two");
5143 return false;
5144 }
5145
5146 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
5147 return true;
5148}
5149
5150bool
5151AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
5152 using namespace llvm::AMDGPU::Swizzle;
5153
5154 if (!skipToken(AsmToken::Comma, "expected a comma")) {
5155 return false;
5156 }
5157
5158 StringRef Ctl;
5159 SMLoc StrLoc = Parser.getTok().getLoc();
5160 if (!parseString(Ctl)) {
5161 return false;
5162 }
5163 if (Ctl.size() != BITMASK_WIDTH) {
5164 Error(StrLoc, "expected a 5-character mask");
5165 return false;
5166 }
5167
5168 unsigned AndMask = 0;
5169 unsigned OrMask = 0;
5170 unsigned XorMask = 0;
5171
5172 for (size_t i = 0; i < Ctl.size(); ++i) {
5173 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
5174 switch(Ctl[i]) {
5175 default:
5176 Error(StrLoc, "invalid mask");
5177 return false;
5178 case '0':
5179 break;
5180 case '1':
5181 OrMask |= Mask;
5182 break;
5183 case 'p':
5184 AndMask |= Mask;
5185 break;
5186 case 'i':
5187 AndMask |= Mask;
5188 XorMask |= Mask;
5189 break;
5190 }
5191 }
5192
5193 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
5194 return true;
5195}
5196
5197bool
5198AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
5199
5200 SMLoc OffsetLoc = Parser.getTok().getLoc();
5201
5202 if (!parseExpr(Imm)) {
5203 return false;
5204 }
5205 if (!isUInt<16>(Imm)) {
5206 Error(OffsetLoc, "expected a 16-bit offset");
5207 return false;
5208 }
5209 return true;
5210}
5211
5212bool
5213AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
5214 using namespace llvm::AMDGPU::Swizzle;
5215
5216 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
5217
5218 SMLoc ModeLoc = Parser.getTok().getLoc();
5219 bool Ok = false;
5220
5221 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
5222 Ok = parseSwizzleQuadPerm(Imm);
5223 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
5224 Ok = parseSwizzleBitmaskPerm(Imm);
5225 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
5226 Ok = parseSwizzleBroadcast(Imm);
5227 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
5228 Ok = parseSwizzleSwap(Imm);
5229 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
5230 Ok = parseSwizzleReverse(Imm);
5231 } else {
5232 Error(ModeLoc, "expected a swizzle mode");
5233 }
5234
5235 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
5236 }
5237
5238 return false;
5239}
5240
5241OperandMatchResultTy
5242AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
5243 SMLoc S = Parser.getTok().getLoc();
5244 int64_t Imm = 0;
5245
5246 if (trySkipId("offset")) {
5247
5248 bool Ok = false;
5249 if (skipToken(AsmToken::Colon, "expected a colon")) {
5250 if (trySkipId("swizzle")) {
5251 Ok = parseSwizzleMacro(Imm);
5252 } else {
5253 Ok = parseSwizzleOffset(Imm);
5254 }
5255 }
5256
5257 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
5258
5259 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
5260 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00005261 // Swizzle "offset" operand is optional.
5262 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00005263 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005264 }
5265}
5266
5267bool
5268AMDGPUOperand::isSwizzle() const {
5269 return isImmTy(ImmTySwizzle);
5270}
5271
5272//===----------------------------------------------------------------------===//
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00005273// VGPR Index Mode
5274//===----------------------------------------------------------------------===//
5275
5276int64_t AMDGPUAsmParser::parseGPRIdxMacro() {
5277
5278 using namespace llvm::AMDGPU::VGPRIndexMode;
5279
5280 if (trySkipToken(AsmToken::RParen)) {
5281 return OFF;
5282 }
5283
5284 int64_t Imm = 0;
5285
5286 while (true) {
5287 unsigned Mode = 0;
5288 SMLoc S = Parser.getTok().getLoc();
5289
5290 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
5291 if (trySkipId(IdSymbolic[ModeId])) {
5292 Mode = 1 << ModeId;
5293 break;
5294 }
5295 }
5296
5297 if (Mode == 0) {
5298 Error(S, (Imm == 0)?
5299 "expected a VGPR index mode or a closing parenthesis" :
5300 "expected a VGPR index mode");
5301 break;
5302 }
5303
5304 if (Imm & Mode) {
5305 Error(S, "duplicate VGPR index mode");
5306 break;
5307 }
5308 Imm |= Mode;
5309
5310 if (trySkipToken(AsmToken::RParen))
5311 break;
5312 if (!skipToken(AsmToken::Comma,
5313 "expected a comma or a closing parenthesis"))
5314 break;
5315 }
5316
5317 return Imm;
5318}
5319
5320OperandMatchResultTy
5321AMDGPUAsmParser::parseGPRIdxMode(OperandVector &Operands) {
5322
5323 int64_t Imm = 0;
5324 SMLoc S = Parser.getTok().getLoc();
5325
5326 if (getLexer().getKind() == AsmToken::Identifier &&
5327 Parser.getTok().getString() == "gpr_idx" &&
5328 getLexer().peekTok().is(AsmToken::LParen)) {
5329
5330 Parser.Lex();
5331 Parser.Lex();
5332
5333 // If parse failed, trigger an error but do not return error code
5334 // to avoid excessive error messages.
5335 Imm = parseGPRIdxMacro();
5336
5337 } else {
5338 if (getParser().parseAbsoluteExpression(Imm))
5339 return MatchOperand_NoMatch;
5340 if (Imm < 0 || !isUInt<4>(Imm)) {
5341 Error(S, "invalid immediate: only 4-bit values are legal");
5342 }
5343 }
5344
5345 Operands.push_back(
5346 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyGprIdxMode));
5347 return MatchOperand_Success;
5348}
5349
5350bool AMDGPUOperand::isGPRIdxMode() const {
5351 return isImmTy(ImmTyGprIdxMode);
5352}
5353
5354//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005355// sopp branch targets
5356//===----------------------------------------------------------------------===//
5357
Alex Bradbury58eba092016-11-01 16:32:05 +00005358OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00005359AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
5360 SMLoc S = Parser.getTok().getLoc();
5361
5362 switch (getLexer().getKind()) {
5363 default: return MatchOperand_ParseFail;
5364 case AsmToken::Integer: {
5365 int64_t Imm;
5366 if (getParser().parseAbsoluteExpression(Imm))
5367 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005368 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00005369 return MatchOperand_Success;
5370 }
5371
5372 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005373 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00005374 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
5375 Parser.getTok().getString()), getContext()), S));
5376 Parser.Lex();
5377 return MatchOperand_Success;
5378 }
5379}
5380
5381//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005382// mubuf
5383//===----------------------------------------------------------------------===//
5384
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005385AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDLC() const {
5386 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDLC);
5387}
5388
Sam Kolton5f10a132016-05-06 11:31:17 +00005389AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005390 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005391}
5392
5393AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005394 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005395}
5396
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005397void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
5398 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005399 bool IsAtomic,
5400 bool IsAtomicReturn,
5401 bool IsLds) {
5402 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005403 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005404 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005405 assert(IsAtomicReturn ? IsAtomic : true);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005406 unsigned FirstOperandIdx = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005407
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005408 for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00005409 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5410
5411 // Add the register arguments
5412 if (Op.isReg()) {
5413 Op.addRegOperands(Inst, 1);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005414 // Insert a tied src for atomic return dst.
5415 // This cannot be postponed as subsequent calls to
5416 // addImmOperands rely on correct number of MC operands.
5417 if (IsAtomicReturn && i == FirstOperandIdx)
5418 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00005419 continue;
5420 }
5421
5422 // Handle the case where soffset is an immediate
5423 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5424 Op.addImmOperands(Inst, 1);
5425 continue;
5426 }
5427
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00005428 HasLdsModifier |= Op.isLDS();
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005429
Tom Stellard45bb48e2015-06-13 03:28:10 +00005430 // Handle tokens like 'offen' which are sometimes hard-coded into the
5431 // asm string. There are no MCInst operands for these.
5432 if (Op.isToken()) {
5433 continue;
5434 }
5435 assert(Op.isImm());
5436
5437 // Handle optional arguments
5438 OptionalIdx[Op.getImmTy()] = i;
5439 }
5440
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005441 // This is a workaround for an llvm quirk which may result in an
5442 // incorrect instruction selection. Lds and non-lds versions of
5443 // MUBUF instructions are identical except that lds versions
5444 // have mandatory 'lds' modifier. However this modifier follows
5445 // optional modifiers and llvm asm matcher regards this 'lds'
5446 // modifier as an optional one. As a result, an lds version
5447 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005448 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005449 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
5450 if (NoLdsOpcode != -1) { // Got lds version - correct it.
5451 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005452 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005453 }
5454 }
5455
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005456 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005457 if (!IsAtomic) { // glc is hard-coded.
5458 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5459 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005460 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005461
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005462 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005463 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5464 }
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005465
5466 if (isGFX10())
5467 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC);
Tom Stellard45bb48e2015-06-13 03:28:10 +00005468}
5469
David Stuttard70e8bc12017-06-22 16:29:22 +00005470void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
5471 OptionalImmIndexMap OptionalIdx;
5472
5473 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
5474 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5475
5476 // Add the register arguments
5477 if (Op.isReg()) {
5478 Op.addRegOperands(Inst, 1);
5479 continue;
5480 }
5481
5482 // Handle the case where soffset is an immediate
5483 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5484 Op.addImmOperands(Inst, 1);
5485 continue;
5486 }
5487
5488 // Handle tokens like 'offen' which are sometimes hard-coded into the
5489 // asm string. There are no MCInst operands for these.
5490 if (Op.isToken()) {
5491 continue;
5492 }
5493 assert(Op.isImm());
5494
5495 // Handle optional arguments
5496 OptionalIdx[Op.getImmTy()] = i;
5497 }
5498
5499 addOptionalImmOperand(Inst, Operands, OptionalIdx,
5500 AMDGPUOperand::ImmTyOffset);
Tim Renouf35484c92018-08-21 11:06:05 +00005501 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT);
David Stuttard70e8bc12017-06-22 16:29:22 +00005502 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5503 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
5504 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005505
5506 if (isGFX10())
5507 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC);
David Stuttard70e8bc12017-06-22 16:29:22 +00005508}
5509
Tom Stellard45bb48e2015-06-13 03:28:10 +00005510//===----------------------------------------------------------------------===//
5511// mimg
5512//===----------------------------------------------------------------------===//
5513
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005514void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
5515 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00005516 unsigned I = 1;
5517 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5518 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5519 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5520 }
5521
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005522 if (IsAtomic) {
5523 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005524 assert(Desc.getNumDefs() == 1);
5525 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005526 }
5527
Sam Kolton1bdcef72016-05-23 09:59:02 +00005528 OptionalImmIndexMap OptionalIdx;
5529
5530 for (unsigned E = Operands.size(); I != E; ++I) {
5531 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5532
5533 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005534 if (Op.isReg()) {
5535 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005536 } else if (Op.isImmModifier()) {
5537 OptionalIdx[Op.getImmTy()] = I;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005538 } else if (!Op.isToken()) {
Matt Arsenault92b355b2016-11-15 19:34:37 +00005539 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00005540 }
5541 }
5542
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005543 bool IsGFX10 = isGFX10();
5544
Sam Kolton1bdcef72016-05-23 09:59:02 +00005545 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005546 if (IsGFX10)
5547 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDim, -1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005548 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005549 if (IsGFX10)
5550 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005551 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005552 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005553 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005554 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5555 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005556 if (!IsGFX10)
5557 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005558 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005559}
5560
5561void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005562 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005563}
5564
Tom Stellard45bb48e2015-06-13 03:28:10 +00005565//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00005566// smrd
5567//===----------------------------------------------------------------------===//
5568
Artem Tamazov54bfd542016-10-31 16:07:39 +00005569bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00005570 return isImm() && isUInt<8>(getImm());
5571}
5572
Artem Tamazov54bfd542016-10-31 16:07:39 +00005573bool AMDGPUOperand::isSMRDOffset20() const {
5574 return isImm() && isUInt<20>(getImm());
5575}
5576
Tom Stellard217361c2015-08-06 19:28:38 +00005577bool AMDGPUOperand::isSMRDLiteralOffset() const {
5578 // 32-bit literals are only supported on CI and we only want to use them
5579 // when the offset is > 8-bits.
5580 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
5581}
5582
Artem Tamazov54bfd542016-10-31 16:07:39 +00005583AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
5584 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5585}
5586
5587AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005588 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005589}
5590
5591AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005592 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005593}
5594
Matt Arsenaultfd023142017-06-12 15:55:58 +00005595AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
5596 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5597}
5598
Matt Arsenault9698f1c2017-06-20 19:54:14 +00005599AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
5600 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5601}
5602
Tom Stellard217361c2015-08-06 19:28:38 +00005603//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005604// vop3
5605//===----------------------------------------------------------------------===//
5606
5607static bool ConvertOmodMul(int64_t &Mul) {
5608 if (Mul != 1 && Mul != 2 && Mul != 4)
5609 return false;
5610
5611 Mul >>= 1;
5612 return true;
5613}
5614
5615static bool ConvertOmodDiv(int64_t &Div) {
5616 if (Div == 1) {
5617 Div = 0;
5618 return true;
5619 }
5620
5621 if (Div == 2) {
5622 Div = 3;
5623 return true;
5624 }
5625
5626 return false;
5627}
5628
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005629static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
5630 if (BoundCtrl == 0) {
5631 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005632 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00005633 }
5634
5635 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005636 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005637 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005638 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005639
Tom Stellard45bb48e2015-06-13 03:28:10 +00005640 return false;
5641}
5642
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005643// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00005644static const OptionalOperand AMDGPUOptionalOperandTable[] = {
5645 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
5646 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
5647 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
5648 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
5649 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
5650 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005651 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005652 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00005653 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005654 {"dlc", AMDGPUOperand::ImmTyDLC, true, nullptr},
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005655 {"format", AMDGPUOperand::ImmTyFORMAT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005656 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
5657 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
5658 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00005659 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005660 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005661 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
5662 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
5663 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
5664 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
Ryan Taylor1f334d02018-08-28 15:07:30 +00005665 {"r128", AMDGPUOperand::ImmTyR128A16, true, nullptr},
5666 {"a16", AMDGPUOperand::ImmTyR128A16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005667 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005668 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005669 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005670 {"dim", AMDGPUOperand::ImmTyDim, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005671 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
5672 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
5673 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00005674 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
5675 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
5676 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005677 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00005678 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005679 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005680 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
5681 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
5682 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
5683 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005684};
Tom Stellard45bb48e2015-06-13 03:28:10 +00005685
Alex Bradbury58eba092016-11-01 16:32:05 +00005686OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00005687 unsigned size = Operands.size();
5688 assert(size > 0);
5689
5690 OperandMatchResultTy res = parseOptionalOpr(Operands);
5691
5692 // This is a hack to enable hardcoded mandatory operands which follow
5693 // optional operands.
5694 //
5695 // Current design assumes that all operands after the first optional operand
5696 // are also optional. However implementation of some instructions violates
5697 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
5698 //
5699 // To alleviate this problem, we have to (implicitly) parse extra operands
5700 // to make sure autogenerated parser of custom operands never hit hardcoded
5701 // mandatory operands.
5702
5703 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
5704
5705 // We have parsed the first optional operand.
5706 // Parse as many operands as necessary to skip all mandatory operands.
5707
5708 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
5709 if (res != MatchOperand_Success ||
5710 getLexer().is(AsmToken::EndOfStatement)) break;
5711 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
5712 res = parseOptionalOpr(Operands);
5713 }
5714 }
5715
5716 return res;
5717}
5718
5719OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00005720 OperandMatchResultTy res;
5721 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
5722 // try to parse any optional operand here
5723 if (Op.IsBit) {
5724 res = parseNamedBit(Op.Name, Operands, Op.Type);
5725 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
5726 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005727 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
5728 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
5729 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
5730 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00005731 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
5732 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005733 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
5734 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
5735 Op.Type == AMDGPUOperand::ImmTyNegLo ||
5736 Op.Type == AMDGPUOperand::ImmTyNegHi) {
5737 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
5738 Op.ConvertResult);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005739 } else if (Op.Type == AMDGPUOperand::ImmTyDim) {
5740 res = parseDim(Operands);
5741 } else if (Op.Type == AMDGPUOperand::ImmTyFORMAT && !isGFX10()) {
Tim Renouf35484c92018-08-21 11:06:05 +00005742 res = parseDfmtNfmt(Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00005743 } else {
5744 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
5745 }
5746 if (res != MatchOperand_NoMatch) {
5747 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005748 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005749 }
5750 return MatchOperand_NoMatch;
5751}
5752
Matt Arsenault12c53892016-11-15 19:58:54 +00005753OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005754 StringRef Name = Parser.getTok().getString();
5755 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00005756 return parseIntWithPrefix("mul", Operands,
5757 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005758 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005759
5760 if (Name == "div") {
5761 return parseIntWithPrefix("div", Operands,
5762 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
5763 }
5764
5765 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005766}
5767
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005768void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
5769 cvtVOP3P(Inst, Operands);
5770
5771 int Opc = Inst.getOpcode();
5772
5773 int SrcNum;
5774 const int Ops[] = { AMDGPU::OpName::src0,
5775 AMDGPU::OpName::src1,
5776 AMDGPU::OpName::src2 };
5777 for (SrcNum = 0;
5778 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
5779 ++SrcNum);
5780 assert(SrcNum > 0);
5781
5782 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
5783 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
5784
5785 if ((OpSel & (1 << SrcNum)) != 0) {
5786 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
5787 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
5788 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
5789 }
5790}
5791
Sam Koltona3ec5c12016-10-07 14:46:06 +00005792static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
5793 // 1. This operand is input modifiers
5794 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
5795 // 2. This is not last operand
5796 && Desc.NumOperands > (OpNum + 1)
5797 // 3. Next operand is register class
5798 && Desc.OpInfo[OpNum + 1].RegClass != -1
5799 // 4. Next register is not tied to any other operand
5800 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
5801}
5802
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005803void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
5804{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005805 OptionalImmIndexMap OptionalIdx;
5806 unsigned Opc = Inst.getOpcode();
5807
5808 unsigned I = 1;
5809 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5810 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5811 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5812 }
5813
5814 for (unsigned E = Operands.size(); I != E; ++I) {
5815 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5816 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5817 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5818 } else if (Op.isInterpSlot() ||
5819 Op.isInterpAttr() ||
5820 Op.isAttrChan()) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00005821 Inst.addOperand(MCOperand::createImm(Op.getImm()));
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005822 } else if (Op.isImmModifier()) {
5823 OptionalIdx[Op.getImmTy()] = I;
5824 } else {
5825 llvm_unreachable("unhandled operand type");
5826 }
5827 }
5828
5829 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
5830 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
5831 }
5832
5833 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5834 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5835 }
5836
5837 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5838 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5839 }
5840}
5841
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005842void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
5843 OptionalImmIndexMap &OptionalIdx) {
5844 unsigned Opc = Inst.getOpcode();
5845
Tom Stellarda90b9522016-02-11 03:28:15 +00005846 unsigned I = 1;
5847 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00005848 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00005849 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00005850 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005851
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005852 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
5853 // This instruction has src modifiers
5854 for (unsigned E = Operands.size(); I != E; ++I) {
5855 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5856 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5857 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5858 } else if (Op.isImmModifier()) {
5859 OptionalIdx[Op.getImmTy()] = I;
5860 } else if (Op.isRegOrImm()) {
5861 Op.addRegOrImmOperands(Inst, 1);
5862 } else {
5863 llvm_unreachable("unhandled operand type");
5864 }
5865 }
5866 } else {
5867 // No src modifiers
5868 for (unsigned E = Operands.size(); I != E; ++I) {
5869 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5870 if (Op.isMod()) {
5871 OptionalIdx[Op.getImmTy()] = I;
5872 } else {
5873 Op.addRegOrImmOperands(Inst, 1);
5874 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005875 }
Tom Stellarda90b9522016-02-11 03:28:15 +00005876 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005877
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005878 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5879 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5880 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005881
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005882 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5883 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5884 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00005885
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005886 // Special case v_mac_{f16, f32} and v_fmac_{f16, f32} (gfx906/gfx10+):
Sam Koltona3ec5c12016-10-07 14:46:06 +00005887 // it has src2 register operand that is tied to dst operand
5888 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00005889 // should be 0.
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00005890 if (Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
5891 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
Matt Arsenault0084adc2018-04-30 19:08:16 +00005892 Opc == AMDGPU::V_MAC_F32_e64_vi ||
5893 Opc == AMDGPU::V_MAC_F16_e64_vi ||
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00005894 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
5895 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
5896 Opc == AMDGPU::V_FMAC_F16_e64_gfx10) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005897 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005898 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005899 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
5900 ++it;
5901 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
5902 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005903}
5904
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005905void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005906 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005907 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005908}
5909
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005910void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
5911 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005912 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005913 const int Opc = Inst.getOpcode();
5914 const MCInstrDesc &Desc = MII.get(Opc);
5915
5916 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005917
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005918 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005919
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00005920 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
5921 assert(!IsPacked);
5922 Inst.addOperand(Inst.getOperand(0));
5923 }
5924
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005925 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
5926 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005927
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005928 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005929
5930 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
5931 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005932 int DefaultVal = IsPacked ? -1 : 0;
5933 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
5934 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005935 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005936
5937 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
5938 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005939 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005940 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
5941 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
5942 }
5943
5944 const int Ops[] = { AMDGPU::OpName::src0,
5945 AMDGPU::OpName::src1,
5946 AMDGPU::OpName::src2 };
5947 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
5948 AMDGPU::OpName::src1_modifiers,
5949 AMDGPU::OpName::src2_modifiers };
5950
5951 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005952
5953 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005954 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005955 unsigned NegLo = 0;
5956 unsigned NegHi = 0;
5957
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005958 if (OpSelHiIdx != -1) {
5959 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
5960 }
5961
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005962 if (NegLoIdx != -1) {
5963 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
5964 NegLo = Inst.getOperand(NegLoIdx).getImm();
5965 NegHi = Inst.getOperand(NegHiIdx).getImm();
5966 }
5967
5968 for (int J = 0; J < 3; ++J) {
5969 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
5970 if (OpIdx == -1)
5971 break;
5972
5973 uint32_t ModVal = 0;
5974
5975 if ((OpSel & (1 << J)) != 0)
5976 ModVal |= SISrcMods::OP_SEL_0;
5977
5978 if ((OpSelHi & (1 << J)) != 0)
5979 ModVal |= SISrcMods::OP_SEL_1;
5980
5981 if ((NegLo & (1 << J)) != 0)
5982 ModVal |= SISrcMods::NEG;
5983
5984 if ((NegHi & (1 << J)) != 0)
5985 ModVal |= SISrcMods::NEG_HI;
5986
5987 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
5988
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00005989 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005990 }
5991}
5992
Sam Koltondfa29f72016-03-09 12:29:31 +00005993//===----------------------------------------------------------------------===//
5994// dpp
5995//===----------------------------------------------------------------------===//
5996
5997bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005998 using namespace AMDGPU::DPP;
5999
Sam Koltondfa29f72016-03-09 12:29:31 +00006000 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
6001 if (result) {
6002 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006003 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
6004 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
6005 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
6006 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
6007 (Imm == DppCtrl::WAVE_SHL1) ||
6008 (Imm == DppCtrl::WAVE_ROL1) ||
6009 (Imm == DppCtrl::WAVE_SHR1) ||
6010 (Imm == DppCtrl::WAVE_ROR1) ||
6011 (Imm == DppCtrl::ROW_MIRROR) ||
6012 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
6013 (Imm == DppCtrl::BCAST15) ||
6014 (Imm == DppCtrl::BCAST31);
Sam Koltondfa29f72016-03-09 12:29:31 +00006015 }
6016 return false;
6017}
6018
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00006019bool AMDGPUOperand::isS16Imm() const {
6020 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
6021}
6022
6023bool AMDGPUOperand::isU16Imm() const {
6024 return isImm() && isUInt<16>(getImm());
6025}
6026
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00006027OperandMatchResultTy AMDGPUAsmParser::parseDim(OperandVector &Operands) {
6028 if (!isGFX10())
6029 return MatchOperand_NoMatch;
6030
6031 SMLoc S = Parser.getTok().getLoc();
6032
6033 if (getLexer().isNot(AsmToken::Identifier))
6034 return MatchOperand_NoMatch;
6035 if (getLexer().getTok().getString() != "dim")
6036 return MatchOperand_NoMatch;
6037
6038 Parser.Lex();
6039 if (getLexer().isNot(AsmToken::Colon))
6040 return MatchOperand_ParseFail;
6041
6042 Parser.Lex();
6043
6044 // We want to allow "dim:1D" etc., but the initial 1 is tokenized as an
6045 // integer.
6046 std::string Token;
6047 if (getLexer().is(AsmToken::Integer)) {
6048 SMLoc Loc = getLexer().getTok().getEndLoc();
6049 Token = getLexer().getTok().getString();
6050 Parser.Lex();
6051 if (getLexer().getTok().getLoc() != Loc)
6052 return MatchOperand_ParseFail;
6053 }
6054 if (getLexer().isNot(AsmToken::Identifier))
6055 return MatchOperand_ParseFail;
6056 Token += getLexer().getTok().getString();
6057
6058 StringRef DimId = Token;
6059 if (DimId.startswith("SQ_RSRC_IMG_"))
6060 DimId = DimId.substr(12);
6061
6062 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByAsmSuffix(DimId);
6063 if (!DimInfo)
6064 return MatchOperand_ParseFail;
6065
6066 Parser.Lex();
6067
6068 Operands.push_back(AMDGPUOperand::CreateImm(this, DimInfo->Encoding, S,
6069 AMDGPUOperand::ImmTyDim));
6070 return MatchOperand_Success;
6071}
6072
Alex Bradbury58eba092016-11-01 16:32:05 +00006073OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00006074AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006075 using namespace AMDGPU::DPP;
6076
Sam Koltondfa29f72016-03-09 12:29:31 +00006077 SMLoc S = Parser.getTok().getLoc();
6078 StringRef Prefix;
6079 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00006080
Sam Koltona74cd522016-03-18 15:35:51 +00006081 if (getLexer().getKind() == AsmToken::Identifier) {
6082 Prefix = Parser.getTok().getString();
6083 } else {
6084 return MatchOperand_NoMatch;
6085 }
6086
6087 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006088 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006089 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006090 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006091 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006092 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006093 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00006094 // Check to prevent parseDPPCtrlOps from eating invalid tokens
6095 if (Prefix != "quad_perm"
6096 && Prefix != "row_shl"
6097 && Prefix != "row_shr"
6098 && Prefix != "row_ror"
6099 && Prefix != "wave_shl"
6100 && Prefix != "wave_rol"
6101 && Prefix != "wave_shr"
6102 && Prefix != "wave_ror"
6103 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00006104 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00006105 }
6106
Sam Koltona74cd522016-03-18 15:35:51 +00006107 Parser.Lex();
6108 if (getLexer().isNot(AsmToken::Colon))
6109 return MatchOperand_ParseFail;
6110
6111 if (Prefix == "quad_perm") {
6112 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00006113 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006114 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00006115 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006116 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00006117
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006118 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00006119 return MatchOperand_ParseFail;
6120
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006121 for (int i = 0; i < 3; ++i) {
6122 if (getLexer().isNot(AsmToken::Comma))
6123 return MatchOperand_ParseFail;
6124 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00006125
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006126 int64_t Temp;
6127 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
6128 return MatchOperand_ParseFail;
6129 const int shift = i*2 + 2;
6130 Int += (Temp << shift);
6131 }
Sam Koltona74cd522016-03-18 15:35:51 +00006132
Sam Koltona74cd522016-03-18 15:35:51 +00006133 if (getLexer().isNot(AsmToken::RBrac))
6134 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006135 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006136 } else {
6137 // sel:%d
6138 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006139 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00006140 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00006141
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006142 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006143 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006144 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006145 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006146 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006147 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006148 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006149 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006150 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006151 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006152 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006153 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006154 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006155 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00006156 } else if (Prefix == "row_bcast") {
6157 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006158 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00006159 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006160 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00006161 } else {
6162 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00006163 }
6164 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00006165 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00006166 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006167 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006168 }
Sam Koltona74cd522016-03-18 15:35:51 +00006169
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006170 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00006171 return MatchOperand_Success;
6172}
6173
Sam Kolton5f10a132016-05-06 11:31:17 +00006174AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006175 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00006176}
6177
David Stuttard20ea21c2019-03-12 09:52:58 +00006178AMDGPUOperand::Ptr AMDGPUAsmParser::defaultEndpgmImmOperands() const {
6179 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyEndpgm);
6180}
6181
Sam Kolton5f10a132016-05-06 11:31:17 +00006182AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006183 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00006184}
6185
Sam Kolton5f10a132016-05-06 11:31:17 +00006186AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006187 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00006188}
6189
6190void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00006191 OptionalImmIndexMap OptionalIdx;
6192
6193 unsigned I = 1;
6194 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
6195 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
6196 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
6197 }
6198
6199 for (unsigned E = Operands.size(); I != E; ++I) {
Valery Pykhtin3d9afa22018-11-30 14:21:56 +00006200 auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(),
6201 MCOI::TIED_TO);
6202 if (TiedTo != -1) {
6203 assert((unsigned)TiedTo < Inst.getNumOperands());
6204 // handle tied old or src2 for MAC instructions
6205 Inst.addOperand(Inst.getOperand(TiedTo));
6206 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006207 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
6208 // Add the register arguments
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00006209 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00006210 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00006211 // Skip it.
6212 continue;
Simon Pilgrim6f349d82019-04-29 17:34:26 +00006213 }
6214 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00006215 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00006216 } else if (Op.isDPPCtrl()) {
6217 Op.addImmOperands(Inst, 1);
6218 } else if (Op.isImm()) {
6219 // Handle optional arguments
6220 OptionalIdx[Op.getImmTy()] = I;
6221 } else {
6222 llvm_unreachable("Invalid operand type");
6223 }
6224 }
6225
Sam Koltondfa29f72016-03-09 12:29:31 +00006226 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
6227 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
6228 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
6229}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00006230
Sam Kolton3025e7f2016-04-26 13:33:56 +00006231//===----------------------------------------------------------------------===//
6232// sdwa
6233//===----------------------------------------------------------------------===//
6234
Alex Bradbury58eba092016-11-01 16:32:05 +00006235OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00006236AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
6237 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006238 using namespace llvm::AMDGPU::SDWA;
6239
Sam Kolton3025e7f2016-04-26 13:33:56 +00006240 SMLoc S = Parser.getTok().getLoc();
6241 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00006242 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00006243
Sam Kolton05ef1c92016-06-03 10:27:37 +00006244 res = parseStringWithPrefix(Prefix, Value);
6245 if (res != MatchOperand_Success) {
6246 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00006247 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00006248
Sam Kolton3025e7f2016-04-26 13:33:56 +00006249 int64_t Int;
6250 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00006251 .Case("BYTE_0", SdwaSel::BYTE_0)
6252 .Case("BYTE_1", SdwaSel::BYTE_1)
6253 .Case("BYTE_2", SdwaSel::BYTE_2)
6254 .Case("BYTE_3", SdwaSel::BYTE_3)
6255 .Case("WORD_0", SdwaSel::WORD_0)
6256 .Case("WORD_1", SdwaSel::WORD_1)
6257 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00006258 .Default(0xffffffff);
6259 Parser.Lex(); // eat last token
6260
6261 if (Int == 0xffffffff) {
6262 return MatchOperand_ParseFail;
6263 }
6264
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006265 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00006266 return MatchOperand_Success;
6267}
6268
Alex Bradbury58eba092016-11-01 16:32:05 +00006269OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00006270AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006271 using namespace llvm::AMDGPU::SDWA;
6272
Sam Kolton3025e7f2016-04-26 13:33:56 +00006273 SMLoc S = Parser.getTok().getLoc();
6274 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00006275 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00006276
6277 res = parseStringWithPrefix("dst_unused", Value);
6278 if (res != MatchOperand_Success) {
6279 return res;
6280 }
6281
6282 int64_t Int;
6283 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00006284 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
6285 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
6286 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00006287 .Default(0xffffffff);
6288 Parser.Lex(); // eat last token
6289
6290 if (Int == 0xffffffff) {
6291 return MatchOperand_ParseFail;
6292 }
6293
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006294 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00006295 return MatchOperand_Success;
6296}
6297
Sam Kolton945231a2016-06-10 09:57:59 +00006298void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00006299 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00006300}
6301
Sam Kolton945231a2016-06-10 09:57:59 +00006302void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00006303 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
6304}
6305
Sam Koltonf7659d712017-05-23 10:08:55 +00006306void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
6307 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
6308}
6309
Sam Kolton5196b882016-07-01 09:59:21 +00006310void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006311 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00006312}
6313
6314void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00006315 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00006316 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00006317
Sam Kolton05ef1c92016-06-03 10:27:37 +00006318 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00006319 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00006320
6321 unsigned I = 1;
6322 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
6323 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
6324 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
6325 }
6326
6327 for (unsigned E = Operands.size(); I != E; ++I) {
6328 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00006329 if (skipVcc && !skippedVcc && Op.isReg() && Op.getReg() == AMDGPU::VCC) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006330 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
6331 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
6332 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
6333 // Skip VCC only if we didn't skip it on previous iteration.
6334 if (BasicInstType == SIInstrFlags::VOP2 &&
6335 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
6336 skippedVcc = true;
6337 continue;
6338 } else if (BasicInstType == SIInstrFlags::VOPC &&
6339 Inst.getNumOperands() == 0) {
6340 skippedVcc = true;
6341 continue;
6342 }
6343 }
6344 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00006345 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00006346 } else if (Op.isImm()) {
6347 // Handle optional arguments
6348 OptionalIdx[Op.getImmTy()] = I;
6349 } else {
6350 llvm_unreachable("Invalid operand type");
6351 }
Sam Koltonf7659d712017-05-23 10:08:55 +00006352 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00006353 }
6354
Stanislav Mekhanoshin4f331cb2019-04-26 23:16:16 +00006355 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx10 &&
6356 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
Sam Koltonf7659d712017-05-23 10:08:55 +00006357 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00006358 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00006359 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006360 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00006361 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00006362 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006363 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
6364 }
Sam Kolton9dffada2017-01-17 15:26:02 +00006365 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
6366 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
6367 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006368 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006369
6370 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00006371 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00006372 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006373 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
6374 }
Sam Kolton9dffada2017-01-17 15:26:02 +00006375 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
6376 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
6377 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
6378 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006379 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006380
6381 case SIInstrFlags::VOPC:
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00006382 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::clamp) != -1)
6383 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00006384 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
6385 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006386 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006387
Sam Koltona3ec5c12016-10-07 14:46:06 +00006388 default:
6389 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
6390 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00006391 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00006392
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00006393 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00006394 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00006395 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
6396 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006397 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00006398 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00006399 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00006400 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00006401 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00006402}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00006403
Tom Stellard45bb48e2015-06-13 03:28:10 +00006404/// Force static initialization.
6405extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00006406 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
6407 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00006408}
6409
6410#define GET_REGISTER_MATCHER
6411#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00006412#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00006413#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00006414
Sam Kolton11de3702016-05-24 12:38:33 +00006415// This fuction should be defined after auto-generated include so that we have
6416// MatchClassKind enum defined
6417unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
6418 unsigned Kind) {
6419 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00006420 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00006421 // operand. This method checks if we are given immediate operand but expect to
6422 // get corresponding token.
6423 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
6424 switch (Kind) {
6425 case MCK_addr64:
6426 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
6427 case MCK_gds:
6428 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00006429 case MCK_lds:
6430 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00006431 case MCK_glc:
6432 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
6433 case MCK_idxen:
6434 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
6435 case MCK_offen:
6436 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006437 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00006438 // When operands have expression values, they will return true for isToken,
6439 // because it is not possible to distinguish between a token and an
6440 // expression at parse time. MatchInstructionImpl() will always try to
6441 // match an operand as a token, when isToken returns true, and when the
6442 // name of the expression is not a valid token, the match will fail,
6443 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006444 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
6445 case MCK_SSrcF32:
6446 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00006447 case MCK_SoppBrTarget:
6448 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00006449 case MCK_VReg32OrOff:
6450 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00006451 case MCK_InterpSlot:
6452 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
6453 case MCK_Attr:
6454 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
6455 case MCK_AttrChan:
6456 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00006457 default:
6458 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00006459 }
6460}
David Stuttard20ea21c2019-03-12 09:52:58 +00006461
6462//===----------------------------------------------------------------------===//
6463// endpgm
6464//===----------------------------------------------------------------------===//
6465
6466OperandMatchResultTy AMDGPUAsmParser::parseEndpgmOp(OperandVector &Operands) {
6467 SMLoc S = Parser.getTok().getLoc();
6468 int64_t Imm = 0;
6469
6470 if (!parseExpr(Imm)) {
6471 // The operand is optional, if not present default to 0
6472 Imm = 0;
6473 }
6474
6475 if (!isUInt<16>(Imm)) {
6476 Error(S, "expected a 16-bit value");
6477 return MatchOperand_ParseFail;
6478 }
6479
6480 Operands.push_back(
6481 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyEndpgm));
6482 return MatchOperand_Success;
6483}
6484
6485bool AMDGPUOperand::isEndpgm() const { return isImmTy(ImmTyEndpgm); }