blob: 1f0f9f238fbc44bad7e46d03c64e4f350f9e5167 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00009#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000014#include "SIInstrInfo.h"
Richard Trieu8ce2ee92019-05-14 21:54:37 +000015#include "TargetInfo/AMDGPUTargetInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000016#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000018#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000020#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000021#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000023#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000025#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000028#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000029#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000033#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000037#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000039#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000040#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000043#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000044#include "llvm/Support/AMDGPUMetadata.h"
Scott Linder1e8c2c72018-06-21 19:38:56 +000045#include "llvm/Support/AMDHSAKernelDescriptor.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000047#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000048#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000049#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000050#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000051#include "llvm/Support/SMLoc.h"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +000052#include "llvm/Support/TargetParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000053#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000054#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000055#include <algorithm>
56#include <cassert>
57#include <cstdint>
58#include <cstring>
59#include <iterator>
60#include <map>
61#include <memory>
62#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000063
Tom Stellard45bb48e2015-06-13 03:28:10 +000064using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000065using namespace llvm::AMDGPU;
Scott Linder1e8c2c72018-06-21 19:38:56 +000066using namespace llvm::amdhsa;
Tom Stellard45bb48e2015-06-13 03:28:10 +000067
68namespace {
69
Sam Kolton1eeb11b2016-09-09 14:44:04 +000070class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000071
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +000072enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL };
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000073
Sam Kolton1eeb11b2016-09-09 14:44:04 +000074//===----------------------------------------------------------------------===//
75// Operand
76//===----------------------------------------------------------------------===//
77
Tom Stellard45bb48e2015-06-13 03:28:10 +000078class AMDGPUOperand : public MCParsedAsmOperand {
79 enum KindTy {
80 Token,
81 Immediate,
82 Register,
83 Expression
84 } Kind;
85
86 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000087 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000088
89public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000090 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000091 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000092
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000093 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000094
Sam Kolton945231a2016-06-10 09:57:59 +000095 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000096 bool Abs = false;
97 bool Neg = false;
98 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000099
100 bool hasFPModifiers() const { return Abs || Neg; }
101 bool hasIntModifiers() const { return Sext; }
102 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
103
104 int64_t getFPModifiersOperand() const {
105 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000106 Operand |= Abs ? SISrcMods::ABS : 0u;
107 Operand |= Neg ? SISrcMods::NEG : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000108 return Operand;
109 }
110
111 int64_t getIntModifiersOperand() const {
112 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000113 Operand |= Sext ? SISrcMods::SEXT : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000114 return Operand;
115 }
116
117 int64_t getModifiersOperand() const {
118 assert(!(hasFPModifiers() && hasIntModifiers())
119 && "fp and int modifiers should not be used simultaneously");
120 if (hasFPModifiers()) {
121 return getFPModifiersOperand();
122 } else if (hasIntModifiers()) {
123 return getIntModifiersOperand();
124 } else {
125 return 0;
126 }
127 }
128
129 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
130 };
131
Tom Stellard45bb48e2015-06-13 03:28:10 +0000132 enum ImmTy {
133 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000135 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000136 ImmTyOffen,
137 ImmTyIdxen,
138 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000139 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000140 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000141 ImmTyOffset0,
142 ImmTyOffset1,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000143 ImmTyDLC,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000144 ImmTyGLC,
145 ImmTySLC,
Piotr Sobczak265e94e2019-10-02 17:22:36 +0000146 ImmTySWZ,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000147 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000148 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000149 ImmTyClampSI,
150 ImmTyOModSI,
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +0000151 ImmTyDPP8,
Sam Koltondfa29f72016-03-09 12:29:31 +0000152 ImmTyDppCtrl,
153 ImmTyDppRowMask,
154 ImmTyDppBankMask,
155 ImmTyDppBoundCtrl,
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +0000156 ImmTyDppFi,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000157 ImmTySdwaDstSel,
158 ImmTySdwaSrc0Sel,
159 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000160 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000161 ImmTyDMask,
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +0000162 ImmTyDim,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000163 ImmTyUNorm,
164 ImmTyDA,
Ryan Taylor1f334d02018-08-28 15:07:30 +0000165 ImmTyR128A16,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000166 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000167 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000168 ImmTyExpCompr,
169 ImmTyExpVM,
Tim Renouf35484c92018-08-21 11:06:05 +0000170 ImmTyFORMAT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000171 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000172 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000173 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000174 ImmTyInterpSlot,
175 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000176 ImmTyAttrChan,
177 ImmTyOpSel,
178 ImmTyOpSelHi,
179 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000180 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000181 ImmTySwizzle,
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000182 ImmTyGprIdxMode,
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +0000183 ImmTyHigh,
184 ImmTyBLGP,
185 ImmTyCBSZ,
186 ImmTyABID,
David Stuttard20ea21c2019-03-12 09:52:58 +0000187 ImmTyEndpgm,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000188 };
189
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000190private:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 struct TokOp {
192 const char *Data;
193 unsigned Length;
194 };
195
196 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000198 ImmTy Type;
199 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000200 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000201 };
202
203 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000204 unsigned RegNo;
Matt Arsenault7f192982016-08-16 20:28:06 +0000205 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000206 };
207
208 union {
209 TokOp Tok;
210 ImmOp Imm;
211 RegOp Reg;
212 const MCExpr *Expr;
213 };
214
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000215public:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000216 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000217 if (Kind == Token)
218 return true;
219
Tom Stellard89049702016-06-15 02:54:14 +0000220 // When parsing operands, we can't always tell if something was meant to be
221 // a token, like 'gds', or an expression that references a global variable.
222 // In this case, we assume the string is an expression, and if we need to
223 // interpret is a token, then we treat the symbol name as the token.
Dmitry Preobrazhensky4ccb7f82019-07-19 13:12:47 +0000224 return isSymbolRefExpr();
225 }
226
227 bool isSymbolRefExpr() const {
228 return isExpr() && Expr && isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000229 }
230
231 bool isImm() const override {
232 return Kind == Immediate;
233 }
234
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000235 bool isInlinableImm(MVT type) const;
236 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000237
Tom Stellard45bb48e2015-06-13 03:28:10 +0000238 bool isRegKind() const {
239 return Kind == Register;
240 }
241
242 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000243 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000244 }
245
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000246 bool isRegOrImmWithInputMods(unsigned RCID, MVT type) const {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000247 return isRegClass(RCID) || isInlinableImm(type) || isLiteralImm(type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000248 }
249
Matt Arsenault4bd72362016-12-10 00:39:12 +0000250 bool isRegOrImmWithInt16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000251 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000252 }
253
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000254 bool isRegOrImmWithInt32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000255 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000256 }
257
258 bool isRegOrImmWithInt64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000259 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000260 }
261
Matt Arsenault4bd72362016-12-10 00:39:12 +0000262 bool isRegOrImmWithFP16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000263 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000264 }
265
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000266 bool isRegOrImmWithFP32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000267 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000268 }
269
270 bool isRegOrImmWithFP64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000271 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000272 }
273
Sam Kolton9772eb32017-01-11 11:46:30 +0000274 bool isVReg() const {
275 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
276 isRegClass(AMDGPU::VReg_64RegClassID) ||
277 isRegClass(AMDGPU::VReg_96RegClassID) ||
278 isRegClass(AMDGPU::VReg_128RegClassID) ||
Stanislav Mekhanoshin5cdacea2019-07-24 16:21:18 +0000279 isRegClass(AMDGPU::VReg_160RegClassID) ||
Sam Kolton9772eb32017-01-11 11:46:30 +0000280 isRegClass(AMDGPU::VReg_256RegClassID) ||
Stanislav Mekhanoshin5cdacea2019-07-24 16:21:18 +0000281 isRegClass(AMDGPU::VReg_512RegClassID) ||
282 isRegClass(AMDGPU::VReg_1024RegClassID);
Sam Kolton9772eb32017-01-11 11:46:30 +0000283 }
284
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000285 bool isVReg32() const {
286 return isRegClass(AMDGPU::VGPR_32RegClassID);
287 }
288
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000289 bool isVReg32OrOff() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000290 return isOff() || isVReg32();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000291 }
292
Dmitry Preobrazhensky472c6b02019-10-11 14:35:11 +0000293 bool isNull() const {
294 return isRegKind() && getReg() == AMDGPU::SGPR_NULL;
295 }
296
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000297 bool isSDWAOperand(MVT type) const;
298 bool isSDWAFP16Operand() const;
299 bool isSDWAFP32Operand() const;
300 bool isSDWAInt16Operand() const;
301 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000302
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000303 bool isImmTy(ImmTy ImmT) const {
304 return isImm() && Imm.Type == ImmT;
305 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000306
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000307 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000308 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000309 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000310
Sam Kolton945231a2016-06-10 09:57:59 +0000311 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
312 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
313 bool isDMask() const { return isImmTy(ImmTyDMask); }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +0000314 bool isDim() const { return isImmTy(ImmTyDim); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000315 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
316 bool isDA() const { return isImmTy(ImmTyDA); }
Ryan Taylor1f334d02018-08-28 15:07:30 +0000317 bool isR128A16() const { return isImmTy(ImmTyR128A16); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000318 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000319 bool isOff() const { return isImmTy(ImmTyOff); }
320 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000321 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
322 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000323 bool isOffen() const { return isImmTy(ImmTyOffen); }
324 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
325 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
326 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
Dmitry Preobrazhensky04bd1182019-03-20 17:13:58 +0000327 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<8>(getImm()); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000328 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000329
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +0000330 bool isFlatOffset() const { return isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000331 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000332 bool isLDS() const { return isImmTy(ImmTyLDS); }
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000333 bool isDLC() const { return isImmTy(ImmTyDLC); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000334 bool isGLC() const { return isImmTy(ImmTyGLC); }
335 bool isSLC() const { return isImmTy(ImmTySLC); }
Piotr Sobczak265e94e2019-10-02 17:22:36 +0000336 bool isSWZ() const { return isImmTy(ImmTySWZ); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000337 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000338 bool isD16() const { return isImmTy(ImmTyD16); }
Tim Renouf35484c92018-08-21 11:06:05 +0000339 bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000340 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
341 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
342 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +0000343 bool isFI() const { return isImmTy(ImmTyDppFi); }
Sam Kolton945231a2016-06-10 09:57:59 +0000344 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
345 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
346 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
347 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000348 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
349 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
350 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000351 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
352 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
353 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
354 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000355 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000356
Sam Kolton945231a2016-06-10 09:57:59 +0000357 bool isMod() const {
358 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000359 }
360
361 bool isRegOrImm() const {
362 return isReg() || isImm();
363 }
364
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000365 bool isRegClass(unsigned RCID) const;
366
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +0000367 bool isInlineValue() const;
368
Sam Kolton9772eb32017-01-11 11:46:30 +0000369 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
370 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
371 }
372
Matt Arsenault4bd72362016-12-10 00:39:12 +0000373 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000374 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000375 }
376
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000377 bool isSCSrcV2B16() const {
378 return isSCSrcB16();
379 }
380
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000381 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000382 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000383 }
384
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000385 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000386 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000387 }
388
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +0000389 bool isBoolReg() const;
390
Matt Arsenault4bd72362016-12-10 00:39:12 +0000391 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000392 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000393 }
394
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000395 bool isSCSrcV2F16() const {
396 return isSCSrcF16();
397 }
398
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000399 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000400 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000401 }
402
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000403 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000404 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000405 }
406
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000407 bool isSSrcB32() const {
408 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
409 }
410
Matt Arsenault4bd72362016-12-10 00:39:12 +0000411 bool isSSrcB16() const {
412 return isSCSrcB16() || isLiteralImm(MVT::i16);
413 }
414
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000415 bool isSSrcV2B16() const {
416 llvm_unreachable("cannot happen");
417 return isSSrcB16();
418 }
419
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000420 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000421 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
422 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000423 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000424 }
425
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000426 bool isSSrcF32() const {
427 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000428 }
429
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000430 bool isSSrcF64() const {
431 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000432 }
433
Matt Arsenault4bd72362016-12-10 00:39:12 +0000434 bool isSSrcF16() const {
435 return isSCSrcB16() || isLiteralImm(MVT::f16);
436 }
437
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000438 bool isSSrcV2F16() const {
439 llvm_unreachable("cannot happen");
440 return isSSrcF16();
441 }
442
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000443 bool isSSrcOrLdsB32() const {
444 return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
445 isLiteralImm(MVT::i32) || isExpr();
446 }
447
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000448 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000449 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000450 }
451
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000452 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000453 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000454 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000455
Matt Arsenault4bd72362016-12-10 00:39:12 +0000456 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000457 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000458 }
459
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000460 bool isVCSrcV2B16() const {
461 return isVCSrcB16();
462 }
463
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000464 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000465 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000466 }
467
468 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000469 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000470 }
471
Matt Arsenault4bd72362016-12-10 00:39:12 +0000472 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000473 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000474 }
475
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000476 bool isVCSrcV2F16() const {
477 return isVCSrcF16();
478 }
479
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000480 bool isVSrcB32() const {
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +0000481 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000482 }
483
484 bool isVSrcB64() const {
485 return isVCSrcF64() || isLiteralImm(MVT::i64);
486 }
487
Matt Arsenault4bd72362016-12-10 00:39:12 +0000488 bool isVSrcB16() const {
489 return isVCSrcF16() || isLiteralImm(MVT::i16);
490 }
491
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000492 bool isVSrcV2B16() const {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000493 return isVSrcB16() || isLiteralImm(MVT::v2i16);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000494 }
495
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000496 bool isVSrcF32() const {
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +0000497 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000498 }
499
500 bool isVSrcF64() const {
501 return isVCSrcF64() || isLiteralImm(MVT::f64);
502 }
503
Matt Arsenault4bd72362016-12-10 00:39:12 +0000504 bool isVSrcF16() const {
505 return isVCSrcF16() || isLiteralImm(MVT::f16);
506 }
507
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000508 bool isVSrcV2F16() const {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000509 return isVSrcF16() || isLiteralImm(MVT::v2f16);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000510 }
511
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +0000512 bool isVISrcB32() const {
513 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i32);
514 }
515
516 bool isVISrcB16() const {
517 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i16);
518 }
519
520 bool isVISrcV2B16() const {
521 return isVISrcB16();
522 }
523
524 bool isVISrcF32() const {
525 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f32);
526 }
527
528 bool isVISrcF16() const {
529 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f16);
530 }
531
532 bool isVISrcV2F16() const {
533 return isVISrcF16() || isVISrcB32();
534 }
535
536 bool isAISrcB32() const {
537 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i32);
538 }
539
540 bool isAISrcB16() const {
541 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i16);
542 }
543
544 bool isAISrcV2B16() const {
545 return isAISrcB16();
546 }
547
548 bool isAISrcF32() const {
549 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f32);
550 }
551
552 bool isAISrcF16() const {
553 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f16);
554 }
555
556 bool isAISrcV2F16() const {
557 return isAISrcF16() || isAISrcB32();
558 }
559
560 bool isAISrc_128B32() const {
561 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i32);
562 }
563
564 bool isAISrc_128B16() const {
565 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i16);
566 }
567
568 bool isAISrc_128V2B16() const {
569 return isAISrc_128B16();
570 }
571
572 bool isAISrc_128F32() const {
573 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f32);
574 }
575
576 bool isAISrc_128F16() const {
577 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f16);
578 }
579
580 bool isAISrc_128V2F16() const {
581 return isAISrc_128F16() || isAISrc_128B32();
582 }
583
584 bool isAISrc_512B32() const {
585 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i32);
586 }
587
588 bool isAISrc_512B16() const {
589 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i16);
590 }
591
592 bool isAISrc_512V2B16() const {
593 return isAISrc_512B16();
594 }
595
596 bool isAISrc_512F32() const {
597 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f32);
598 }
599
600 bool isAISrc_512F16() const {
601 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f16);
602 }
603
604 bool isAISrc_512V2F16() const {
605 return isAISrc_512F16() || isAISrc_512B32();
606 }
607
608 bool isAISrc_1024B32() const {
609 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i32);
610 }
611
612 bool isAISrc_1024B16() const {
613 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i16);
614 }
615
616 bool isAISrc_1024V2B16() const {
617 return isAISrc_1024B16();
618 }
619
620 bool isAISrc_1024F32() const {
621 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f32);
622 }
623
624 bool isAISrc_1024F16() const {
625 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f16);
626 }
627
628 bool isAISrc_1024V2F16() const {
629 return isAISrc_1024F16() || isAISrc_1024B32();
630 }
631
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000632 bool isKImmFP32() const {
633 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000634 }
635
Matt Arsenault4bd72362016-12-10 00:39:12 +0000636 bool isKImmFP16() const {
637 return isLiteralImm(MVT::f16);
638 }
639
Tom Stellard45bb48e2015-06-13 03:28:10 +0000640 bool isMem() const override {
641 return false;
642 }
643
644 bool isExpr() const {
645 return Kind == Expression;
646 }
647
648 bool isSoppBrTarget() const {
649 return isExpr() || isImm();
650 }
651
Sam Kolton945231a2016-06-10 09:57:59 +0000652 bool isSWaitCnt() const;
653 bool isHwreg() const;
654 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000655 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000656 bool isSMRDOffset8() const;
657 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000658 bool isSMRDLiteralOffset() const;
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +0000659 bool isDPP8() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000660 bool isDPPCtrl() const;
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +0000661 bool isBLGP() const;
662 bool isCBSZ() const;
663 bool isABID() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000664 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000665 bool isS16Imm() const;
666 bool isU16Imm() const;
David Stuttard20ea21c2019-03-12 09:52:58 +0000667 bool isEndpgm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000668
Tom Stellard89049702016-06-15 02:54:14 +0000669 StringRef getExpressionAsToken() const {
670 assert(isExpr());
671 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
672 return S->getSymbol().getName();
673 }
674
Sam Kolton945231a2016-06-10 09:57:59 +0000675 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000676 assert(isToken());
677
678 if (Kind == Expression)
679 return getExpressionAsToken();
680
Sam Kolton945231a2016-06-10 09:57:59 +0000681 return StringRef(Tok.Data, Tok.Length);
682 }
683
684 int64_t getImm() const {
685 assert(isImm());
686 return Imm.Val;
687 }
688
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000689 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000690 assert(isImm());
691 return Imm.Type;
692 }
693
694 unsigned getReg() const override {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000695 assert(isRegKind());
Sam Kolton945231a2016-06-10 09:57:59 +0000696 return Reg.RegNo;
697 }
698
Tom Stellard45bb48e2015-06-13 03:28:10 +0000699 SMLoc getStartLoc() const override {
700 return StartLoc;
701 }
702
Peter Collingbourne0da86302016-10-10 22:49:37 +0000703 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000704 return EndLoc;
705 }
706
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000707 SMRange getLocRange() const {
708 return SMRange(StartLoc, EndLoc);
709 }
710
Sam Kolton945231a2016-06-10 09:57:59 +0000711 Modifiers getModifiers() const {
712 assert(isRegKind() || isImmTy(ImmTyNone));
713 return isRegKind() ? Reg.Mods : Imm.Mods;
714 }
715
716 void setModifiers(Modifiers Mods) {
717 assert(isRegKind() || isImmTy(ImmTyNone));
718 if (isRegKind())
719 Reg.Mods = Mods;
720 else
721 Imm.Mods = Mods;
722 }
723
724 bool hasModifiers() const {
725 return getModifiers().hasModifiers();
726 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000727
Sam Kolton945231a2016-06-10 09:57:59 +0000728 bool hasFPModifiers() const {
729 return getModifiers().hasFPModifiers();
730 }
731
732 bool hasIntModifiers() const {
733 return getModifiers().hasIntModifiers();
734 }
735
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000736 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
737
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000738 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000739
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000740 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000741
Matt Arsenault4bd72362016-12-10 00:39:12 +0000742 template <unsigned Bitwidth>
743 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
744
745 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
746 addKImmFPOperands<16>(Inst, N);
747 }
748
749 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
750 addKImmFPOperands<32>(Inst, N);
751 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000752
753 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000754
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +0000755 void addBoolRegOperands(MCInst &Inst, unsigned N) const {
756 addRegOperands(Inst, N);
757 }
758
Sam Kolton945231a2016-06-10 09:57:59 +0000759 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
760 if (isRegKind())
761 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000762 else if (isExpr())
763 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000764 else
765 addImmOperands(Inst, N);
766 }
767
768 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
769 Modifiers Mods = getModifiers();
770 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
771 if (isRegKind()) {
772 addRegOperands(Inst, N);
773 } else {
774 addImmOperands(Inst, N, false);
775 }
776 }
777
778 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
779 assert(!hasIntModifiers());
780 addRegOrImmWithInputModsOperands(Inst, N);
781 }
782
783 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
784 assert(!hasFPModifiers());
785 addRegOrImmWithInputModsOperands(Inst, N);
786 }
787
Sam Kolton9772eb32017-01-11 11:46:30 +0000788 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
789 Modifiers Mods = getModifiers();
790 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
791 assert(isRegKind());
792 addRegOperands(Inst, N);
793 }
794
795 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
796 assert(!hasIntModifiers());
797 addRegWithInputModsOperands(Inst, N);
798 }
799
800 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
801 assert(!hasFPModifiers());
802 addRegWithInputModsOperands(Inst, N);
803 }
804
Sam Kolton945231a2016-06-10 09:57:59 +0000805 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
806 if (isImm())
807 addImmOperands(Inst, N);
808 else {
809 assert(isExpr());
810 Inst.addOperand(MCOperand::createExpr(Expr));
811 }
812 }
813
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000814 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000815 switch (Type) {
816 case ImmTyNone: OS << "None"; break;
817 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000818 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000819 case ImmTyOffen: OS << "Offen"; break;
820 case ImmTyIdxen: OS << "Idxen"; break;
821 case ImmTyAddr64: OS << "Addr64"; break;
822 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000823 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000824 case ImmTyOffset0: OS << "Offset0"; break;
825 case ImmTyOffset1: OS << "Offset1"; break;
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000826 case ImmTyDLC: OS << "DLC"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000827 case ImmTyGLC: OS << "GLC"; break;
828 case ImmTySLC: OS << "SLC"; break;
Piotr Sobczak265e94e2019-10-02 17:22:36 +0000829 case ImmTySWZ: OS << "SWZ"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000830 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000831 case ImmTyD16: OS << "D16"; break;
Tim Renouf35484c92018-08-21 11:06:05 +0000832 case ImmTyFORMAT: OS << "FORMAT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000833 case ImmTyClampSI: OS << "ClampSI"; break;
834 case ImmTyOModSI: OS << "OModSI"; break;
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +0000835 case ImmTyDPP8: OS << "DPP8"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000836 case ImmTyDppCtrl: OS << "DppCtrl"; break;
837 case ImmTyDppRowMask: OS << "DppRowMask"; break;
838 case ImmTyDppBankMask: OS << "DppBankMask"; break;
839 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +0000840 case ImmTyDppFi: OS << "FI"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000841 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
842 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
843 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000844 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
845 case ImmTyDMask: OS << "DMask"; break;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +0000846 case ImmTyDim: OS << "Dim"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000847 case ImmTyUNorm: OS << "UNorm"; break;
848 case ImmTyDA: OS << "DA"; break;
Ryan Taylor1f334d02018-08-28 15:07:30 +0000849 case ImmTyR128A16: OS << "R128A16"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000850 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000851 case ImmTyOff: OS << "Off"; break;
852 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000853 case ImmTyExpCompr: OS << "ExpCompr"; break;
854 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000855 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000856 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000857 case ImmTyInterpSlot: OS << "InterpSlot"; break;
858 case ImmTyInterpAttr: OS << "InterpAttr"; break;
859 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000860 case ImmTyOpSel: OS << "OpSel"; break;
861 case ImmTyOpSelHi: OS << "OpSelHi"; break;
862 case ImmTyNegLo: OS << "NegLo"; break;
863 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000864 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000865 case ImmTyGprIdxMode: OS << "GprIdxMode"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000866 case ImmTyHigh: OS << "High"; break;
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +0000867 case ImmTyBLGP: OS << "BLGP"; break;
868 case ImmTyCBSZ: OS << "CBSZ"; break;
869 case ImmTyABID: OS << "ABID"; break;
870 case ImmTyEndpgm: OS << "Endpgm"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000871 }
872 }
873
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000874 void print(raw_ostream &OS) const override {
875 switch (Kind) {
876 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000877 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000878 break;
879 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000880 OS << '<' << getImm();
881 if (getImmTy() != ImmTyNone) {
882 OS << " type: "; printImmTy(OS, getImmTy());
883 }
Sam Kolton945231a2016-06-10 09:57:59 +0000884 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000885 break;
886 case Token:
887 OS << '\'' << getToken() << '\'';
888 break;
889 case Expression:
890 OS << "<expr " << *Expr << '>';
891 break;
892 }
893 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000894
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000895 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
896 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000897 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000898 bool IsFPImm = false) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000899 auto Op = std::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000900 Op->Imm.Val = Val;
901 Op->Imm.IsFPImm = IsFPImm;
902 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000903 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000904 Op->StartLoc = Loc;
905 Op->EndLoc = Loc;
906 return Op;
907 }
908
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000909 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
910 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000911 bool HasExplicitEncodingSize = true) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000912 auto Res = std::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000913 Res->Tok.Data = Str.data();
914 Res->Tok.Length = Str.size();
915 Res->StartLoc = Loc;
916 Res->EndLoc = Loc;
917 return Res;
918 }
919
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000920 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
921 unsigned RegNo, SMLoc S,
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000922 SMLoc E) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000923 auto Op = std::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000924 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000925 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000926 Op->StartLoc = S;
927 Op->EndLoc = E;
928 return Op;
929 }
930
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000931 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
932 const class MCExpr *Expr, SMLoc S) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000933 auto Op = std::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000934 Op->Expr = Expr;
935 Op->StartLoc = S;
936 Op->EndLoc = S;
937 return Op;
938 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000939};
940
Sam Kolton945231a2016-06-10 09:57:59 +0000941raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
942 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
943 return OS;
944}
945
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000946//===----------------------------------------------------------------------===//
947// AsmParser
948//===----------------------------------------------------------------------===//
949
Artem Tamazova01cce82016-12-27 16:00:11 +0000950// Holds info related to the current kernel, e.g. count of SGPRs used.
951// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
952// .amdgpu_hsa_kernel or at EOF.
953class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000954 int SgprIndexUnusedMin = -1;
955 int VgprIndexUnusedMin = -1;
956 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000957
958 void usesSgprAt(int i) {
959 if (i >= SgprIndexUnusedMin) {
960 SgprIndexUnusedMin = ++i;
961 if (Ctx) {
962 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
963 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
964 }
965 }
966 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000967
Artem Tamazova01cce82016-12-27 16:00:11 +0000968 void usesVgprAt(int i) {
969 if (i >= VgprIndexUnusedMin) {
970 VgprIndexUnusedMin = ++i;
971 if (Ctx) {
972 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
973 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
974 }
975 }
976 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000977
Artem Tamazova01cce82016-12-27 16:00:11 +0000978public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000979 KernelScopeInfo() = default;
980
Artem Tamazova01cce82016-12-27 16:00:11 +0000981 void initialize(MCContext &Context) {
982 Ctx = &Context;
983 usesSgprAt(SgprIndexUnusedMin = -1);
984 usesVgprAt(VgprIndexUnusedMin = -1);
985 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000986
Artem Tamazova01cce82016-12-27 16:00:11 +0000987 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
988 switch (RegKind) {
989 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +0000990 case IS_AGPR: // fall through
Artem Tamazova01cce82016-12-27 16:00:11 +0000991 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
992 default: break;
993 }
994 }
995};
996
Tom Stellard45bb48e2015-06-13 03:28:10 +0000997class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000998 MCAsmParser &Parser;
999
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001000 // Number of extra operands parsed after the first optional operand.
1001 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00001002 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001003
Eugene Zelenko66203762017-01-21 00:53:49 +00001004 unsigned ForcedEncodingSize = 0;
1005 bool ForcedDPP = false;
1006 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001007 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +00001008
Tom Stellard45bb48e2015-06-13 03:28:10 +00001009 /// @name Auto-generated Match Functions
1010 /// {
1011
1012#define GET_ASSEMBLER_HEADER
1013#include "AMDGPUGenAsmMatcher.inc"
1014
1015 /// }
1016
Tom Stellard347ac792015-06-26 21:15:07 +00001017private:
Artem Tamazov25478d82016-12-29 15:41:52 +00001018 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Scott Linder1e8c2c72018-06-21 19:38:56 +00001019 bool OutOfRangeError(SMRange Range);
1020 /// Calculate VGPR/SGPR blocks required for given target, reserved
1021 /// registers, and user-specified NextFreeXGPR values.
1022 ///
1023 /// \param Features [in] Target features, used for bug corrections.
1024 /// \param VCCUsed [in] Whether VCC special SGPR is reserved.
1025 /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
1026 /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00001027 /// \param EnableWavefrontSize32 [in] Value of ENABLE_WAVEFRONT_SIZE32 kernel
1028 /// descriptor field, if valid.
Scott Linder1e8c2c72018-06-21 19:38:56 +00001029 /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
1030 /// \param VGPRRange [in] Token range, used for VGPR diagnostics.
1031 /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
1032 /// \param SGPRRange [in] Token range, used for SGPR diagnostics.
1033 /// \param VGPRBlocks [out] Result VGPR block count.
1034 /// \param SGPRBlocks [out] Result SGPR block count.
1035 bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
1036 bool FlatScrUsed, bool XNACKUsed,
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00001037 Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
1038 SMRange VGPRRange, unsigned NextFreeSGPR,
1039 SMRange SGPRRange, unsigned &VGPRBlocks,
1040 unsigned &SGPRBlocks);
Scott Linder1e8c2c72018-06-21 19:38:56 +00001041 bool ParseDirectiveAMDGCNTarget();
1042 bool ParseDirectiveAMDHSAKernel();
Tom Stellard347ac792015-06-26 21:15:07 +00001043 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
1044 bool ParseDirectiveHSACodeObjectVersion();
1045 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00001046 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
1047 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +00001048 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001049 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00001050
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00001051 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00001052 bool ParseDirectiveHSAMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +00001053 bool ParseDirectivePALMetadataBegin();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00001054 bool ParseDirectivePALMetadata();
Nicolai Haehnle08e8cb52019-06-25 11:51:35 +00001055 bool ParseDirectiveAMDGPULDS();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00001056
Tim Renoufe7bd52f2019-03-20 18:47:21 +00001057 /// Common code to parse out a block of text (typically YAML) between start and
1058 /// end directives.
1059 bool ParseToEndDirective(const char *AssemblerDirectiveBegin,
1060 const char *AssemblerDirectiveEnd,
1061 std::string &CollectString);
1062
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001063 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00001064 RegisterKind RegKind, unsigned Reg1);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001065 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00001066 unsigned& RegNum, unsigned& RegWidth);
1067 unsigned ParseRegularReg(RegisterKind &RegKind,
1068 unsigned &RegNum,
1069 unsigned &RegWidth);
1070 unsigned ParseSpecialReg(RegisterKind &RegKind,
1071 unsigned &RegNum,
1072 unsigned &RegWidth);
1073 unsigned ParseRegList(RegisterKind &RegKind,
1074 unsigned &RegNum,
1075 unsigned &RegWidth);
1076 bool ParseRegRange(unsigned& Num, unsigned& Width);
1077 unsigned getRegularReg(RegisterKind RegKind,
1078 unsigned RegNum,
1079 unsigned RegWidth);
1080
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001081 bool isRegister();
1082 bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
Scott Linder1e8c2c72018-06-21 19:38:56 +00001083 Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
1084 void initializeGprCountSymbol(RegisterKind RegKind);
1085 bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
1086 unsigned RegWidth);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001087 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001088 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001089 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
1090 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +00001091
Tom Stellard45bb48e2015-06-13 03:28:10 +00001092public:
Tom Stellard88e0b252015-10-06 15:57:53 +00001093 enum AMDGPUMatchResultTy {
1094 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
1095 };
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001096 enum OperandMode {
1097 OperandMode_Default,
1098 OperandMode_NSA,
1099 };
Tom Stellard88e0b252015-10-06 15:57:53 +00001100
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001101 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001102
Akira Hatanakab11ef082015-11-14 06:35:56 +00001103 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +00001104 const MCInstrInfo &MII,
1105 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +00001106 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +00001107 MCAsmParserExtension::Initialize(Parser);
1108
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001109 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001110 // Set default features.
Matt Arsenault45c165b2019-04-03 00:01:03 +00001111 copySTI().ToggleFeature("southern-islands");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001112 }
1113
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001114 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +00001115
1116 {
1117 // TODO: make those pre-defined variables read-only.
1118 // Currently there is none suitable machinery in the core llvm-mc for this.
1119 // MCSymbol::isRedefinable is intended for another purpose, and
1120 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00001121 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Artem Tamazov17091362016-06-14 15:03:59 +00001122 MCContext &Ctx = getContext();
Scott Linder1e8c2c72018-06-21 19:38:56 +00001123 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
1124 MCSymbol *Sym =
1125 Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
1126 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Dmitry Preobrazhensky62a03182019-02-08 13:51:31 +00001127 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_minor"));
1128 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
1129 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_stepping"));
1130 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Scott Linder1e8c2c72018-06-21 19:38:56 +00001131 } else {
1132 MCSymbol *Sym =
1133 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
1134 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
1135 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
1136 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
1137 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
1138 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
1139 }
1140 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
1141 initializeGprCountSymbol(IS_VGPR);
1142 initializeGprCountSymbol(IS_SGPR);
1143 } else
1144 KernelScope.initialize(getContext());
Artem Tamazov17091362016-06-14 15:03:59 +00001145 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001146 }
1147
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001148 bool hasXNACK() const {
1149 return AMDGPU::hasXNACK(getSTI());
1150 }
1151
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001152 bool hasMIMG_R128() const {
1153 return AMDGPU::hasMIMG_R128(getSTI());
1154 }
1155
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00001156 bool hasPackedD16() const {
1157 return AMDGPU::hasPackedD16(getSTI());
1158 }
1159
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001160 bool isSI() const {
1161 return AMDGPU::isSI(getSTI());
1162 }
1163
1164 bool isCI() const {
1165 return AMDGPU::isCI(getSTI());
1166 }
1167
1168 bool isVI() const {
1169 return AMDGPU::isVI(getSTI());
1170 }
1171
Sam Koltonf7659d712017-05-23 10:08:55 +00001172 bool isGFX9() const {
1173 return AMDGPU::isGFX9(getSTI());
1174 }
1175
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001176 bool isGFX10() const {
1177 return AMDGPU::isGFX10(getSTI());
1178 }
1179
Matt Arsenault26faed32016-12-05 22:26:17 +00001180 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001181 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +00001182 }
1183
Matt Arsenaultfd023142017-06-12 15:55:58 +00001184 bool hasFlatOffsets() const {
1185 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
1186 }
1187
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001188 bool hasSGPR102_SGPR103() const {
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00001189 return !isVI() && !isGFX9();
1190 }
1191
1192 bool hasSGPR104_SGPR105() const {
1193 return isGFX10();
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001194 }
1195
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001196 bool hasIntClamp() const {
1197 return getFeatureBits()[AMDGPU::FeatureIntClamp];
1198 }
1199
Tom Stellard347ac792015-06-26 21:15:07 +00001200 AMDGPUTargetStreamer &getTargetStreamer() {
1201 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
1202 return static_cast<AMDGPUTargetStreamer &>(TS);
1203 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001204
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001205 const MCRegisterInfo *getMRI() const {
1206 // We need this const_cast because for some reason getContext() is not const
1207 // in MCAsmParser.
1208 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
1209 }
1210
1211 const MCInstrInfo *getMII() const {
1212 return &MII;
1213 }
1214
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001215 const FeatureBitset &getFeatureBits() const {
1216 return getSTI().getFeatureBits();
1217 }
1218
Sam Kolton05ef1c92016-06-03 10:27:37 +00001219 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
1220 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1221 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +00001222
Sam Kolton05ef1c92016-06-03 10:27:37 +00001223 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
1224 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
1225 bool isForcedDPP() const { return ForcedDPP; }
1226 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +00001227 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001228
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001229 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +00001230 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1231 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +00001232 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
1233 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1235 OperandVector &Operands, MCStreamer &Out,
1236 uint64_t &ErrorInfo,
1237 bool MatchingInlineAsm) override;
1238 bool ParseDirective(AsmToken DirectiveID) override;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001239 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic,
1240 OperandMode Mode = OperandMode_Default);
Sam Kolton05ef1c92016-06-03 10:27:37 +00001241 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001242 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1243 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001244 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001245
Sam Kolton11de3702016-05-24 12:38:33 +00001246 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001247
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001248 OperandMatchResultTy
1249 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001250 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001251 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001252
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00001253 OperandMatchResultTy
1254 parseOperandArrayWithPrefix(const char *Prefix,
1255 OperandVector &Operands,
1256 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1257 bool (*ConvertResult)(int64_t&) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001258
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001259 OperandMatchResultTy
1260 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001261 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001262 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1263 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001264
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00001265 bool isModifier();
1266 bool isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
1267 bool isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
1268 bool isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
1269 bool isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const;
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001270 bool parseSP3NegModifier();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001271 OperandMatchResultTy parseImm(OperandVector &Operands, bool HasSP3AbsModifier = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001272 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00001273 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001274 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1275 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1276 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1277 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001278 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Tim Renouf35484c92018-08-21 11:06:05 +00001279 OperandMatchResultTy parseDfmtNfmt(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001280
Tom Stellard45bb48e2015-06-13 03:28:10 +00001281 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001282 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1283 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001284 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001285
1286 bool parseCnt(int64_t &IntVal);
1287 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001288 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001289
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001290private:
1291 struct OperandInfoTy {
1292 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001293 bool IsSymbolic = false;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00001294 bool IsDefined = false;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001295
1296 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001297 };
Sam Kolton11de3702016-05-24 12:38:33 +00001298
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00001299 bool parseSendMsgBody(OperandInfoTy &Msg, OperandInfoTy &Op, OperandInfoTy &Stream);
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00001300 bool validateSendMsg(const OperandInfoTy &Msg,
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00001301 const OperandInfoTy &Op,
1302 const OperandInfoTy &Stream,
1303 const SMLoc Loc);
1304
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00001305 bool parseHwregBody(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00001306 bool validateHwreg(const OperandInfoTy &HwReg,
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00001307 const int64_t Offset,
1308 const int64_t Width,
1309 const SMLoc Loc);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001310
1311 void errorExpTgt();
1312 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00001313 SMLoc getFlatOffsetLoc(const OperandVector &Operands) const;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001314
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00001315 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc, const OperandVector &Operands);
1316 bool validateFlatOffset(const MCInst &Inst, const OperandVector &Operands);
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00001317 bool validateSOPLiteral(const MCInst &Inst) const;
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001318 bool validateConstantBusLimitations(const MCInst &Inst);
1319 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001320 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001321 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001322 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhenskyedd9f702019-11-18 17:23:40 +03001323 bool validateMovrels(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001324 bool validateMIMGDataSize(const MCInst &Inst);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001325 bool validateMIMGAddrSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001326 bool validateMIMGD16(const MCInst &Inst);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001327 bool validateMIMGDim(const MCInst &Inst);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001328 bool validateLdsDirect(const MCInst &Inst);
Stanislav Mekhanoshin5f581c92019-06-12 17:52:51 +00001329 bool validateOpSel(const MCInst &Inst);
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00001330 bool validateVccOperand(unsigned Reg) const;
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001331 bool validateVOP3Literal(const MCInst &Inst) const;
Dmitry Preobrazhenskyfe2ee4c2019-09-02 12:50:05 +00001332 unsigned getConstantBusLimit(unsigned Opcode) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001333 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1334 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1335 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001336
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001337 bool isId(const StringRef Id) const;
1338 bool isId(const AsmToken &Token, const StringRef Id) const;
1339 bool isToken(const AsmToken::TokenKind Kind) const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001340 bool trySkipId(const StringRef Id);
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00001341 bool trySkipId(const StringRef Id, const AsmToken::TokenKind Kind);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001342 bool trySkipToken(const AsmToken::TokenKind Kind);
1343 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1344 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001345 void peekTokens(MutableArrayRef<AsmToken> Tokens);
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001346 AsmToken::TokenKind getTokenKind() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001347 bool parseExpr(int64_t &Imm);
Dmitry Preobrazhensky4ccb7f82019-07-19 13:12:47 +00001348 bool parseExpr(OperandVector &Operands);
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001349 StringRef getTokenStr() const;
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001350 AsmToken peekToken();
1351 AsmToken getToken() const;
1352 SMLoc getLoc() const;
1353 void lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001354
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001355public:
Sam Kolton11de3702016-05-24 12:38:33 +00001356 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001357 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001358
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001359 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001360 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001361 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1362 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001363 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00001364 OperandMatchResultTy parseBoolReg(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001365
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001366 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1367 const unsigned MinVal,
1368 const unsigned MaxVal,
1369 const StringRef ErrMsg);
1370 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1371 bool parseSwizzleOffset(int64_t &Imm);
1372 bool parseSwizzleMacro(int64_t &Imm);
1373 bool parseSwizzleQuadPerm(int64_t &Imm);
1374 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1375 bool parseSwizzleBroadcast(int64_t &Imm);
1376 bool parseSwizzleSwap(int64_t &Imm);
1377 bool parseSwizzleReverse(int64_t &Imm);
1378
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00001379 OperandMatchResultTy parseGPRIdxMode(OperandVector &Operands);
1380 int64_t parseGPRIdxMacro();
1381
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001382 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1383 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1384 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001385 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001386 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1387
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001388 AMDGPUOperand::Ptr defaultDLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001389 AMDGPUOperand::Ptr defaultGLC() const;
1390 AMDGPUOperand::Ptr defaultSLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001391
Artem Tamazov54bfd542016-10-31 16:07:39 +00001392 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1393 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001394 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00001395 AMDGPUOperand::Ptr defaultFlatOffset() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001396
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001397 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1398
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001399 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1400 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001401 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001402 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001403 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001404
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001405 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1406
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001407 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1408 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001409 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001410
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001411 OperandMatchResultTy parseDim(OperandVector &Operands);
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00001412 OperandMatchResultTy parseDPP8(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001413 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001414 AMDGPUOperand::Ptr defaultRowMask() const;
1415 AMDGPUOperand::Ptr defaultBankMask() const;
1416 AMDGPUOperand::Ptr defaultBoundCtrl() const;
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00001417 AMDGPUOperand::Ptr defaultFI() const;
1418 void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool IsDPP8 = false);
1419 void cvtDPP8(MCInst &Inst, const OperandVector &Operands) { cvtDPP(Inst, Operands, true); }
Sam Kolton3025e7f2016-04-26 13:33:56 +00001420
Sam Kolton05ef1c92016-06-03 10:27:37 +00001421 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1422 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001423 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001424 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1425 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001426 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00001427 void cvtSdwaVOP2e(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001428 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1429 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00001430 uint64_t BasicInstType,
1431 bool SkipDstVcc = false,
1432 bool SkipSrcVcc = false);
David Stuttard20ea21c2019-03-12 09:52:58 +00001433
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001434 AMDGPUOperand::Ptr defaultBLGP() const;
1435 AMDGPUOperand::Ptr defaultCBSZ() const;
1436 AMDGPUOperand::Ptr defaultABID() const;
1437
David Stuttard20ea21c2019-03-12 09:52:58 +00001438 OperandMatchResultTy parseEndpgmOp(OperandVector &Operands);
1439 AMDGPUOperand::Ptr defaultEndpgmImmOperands() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001440};
1441
1442struct OptionalOperand {
1443 const char *Name;
1444 AMDGPUOperand::ImmTy Type;
1445 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001446 bool (*ConvertResult)(int64_t&);
1447};
1448
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001449} // end anonymous namespace
1450
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001451// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001452static const fltSemantics *getFltSemantics(unsigned Size) {
1453 switch (Size) {
1454 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001455 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001456 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001457 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001458 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001459 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001460 default:
1461 llvm_unreachable("unsupported fp type");
1462 }
1463}
1464
Matt Arsenault4bd72362016-12-10 00:39:12 +00001465static const fltSemantics *getFltSemantics(MVT VT) {
1466 return getFltSemantics(VT.getSizeInBits() / 8);
1467}
1468
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001469static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1470 switch (OperandType) {
1471 case AMDGPU::OPERAND_REG_IMM_INT32:
1472 case AMDGPU::OPERAND_REG_IMM_FP32:
1473 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1474 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001475 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
1476 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001477 return &APFloat::IEEEsingle();
1478 case AMDGPU::OPERAND_REG_IMM_INT64:
1479 case AMDGPU::OPERAND_REG_IMM_FP64:
1480 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1481 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1482 return &APFloat::IEEEdouble();
1483 case AMDGPU::OPERAND_REG_IMM_INT16:
1484 case AMDGPU::OPERAND_REG_IMM_FP16:
1485 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1486 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1487 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1488 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001489 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
1490 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
1491 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
1492 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001493 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1494 case AMDGPU::OPERAND_REG_IMM_V2FP16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001495 return &APFloat::IEEEhalf();
1496 default:
1497 llvm_unreachable("unsupported fp type");
1498 }
1499}
1500
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001501//===----------------------------------------------------------------------===//
1502// Operand
1503//===----------------------------------------------------------------------===//
1504
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001505static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1506 bool Lost;
1507
1508 // Convert literal to single precision
1509 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1510 APFloat::rmNearestTiesToEven,
1511 &Lost);
1512 // We allow precision lost but not overflow or underflow
1513 if (Status != APFloat::opOK &&
1514 Lost &&
1515 ((Status & APFloat::opOverflow) != 0 ||
1516 (Status & APFloat::opUnderflow) != 0)) {
1517 return false;
1518 }
1519
1520 return true;
1521}
1522
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001523static bool isSafeTruncation(int64_t Val, unsigned Size) {
1524 return isUIntN(Size, Val) || isIntN(Size, Val);
1525}
1526
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001527bool AMDGPUOperand::isInlinableImm(MVT type) const {
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001528
1529 // This is a hack to enable named inline values like
1530 // shared_base with both 32-bit and 64-bit operands.
1531 // Note that these values are defined as
1532 // 32-bit operands only.
1533 if (isInlineValue()) {
1534 return true;
1535 }
1536
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001537 if (!isImmTy(ImmTyNone)) {
1538 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1539 return false;
1540 }
1541 // TODO: We should avoid using host float here. It would be better to
1542 // check the float bit values which is what a few other places do.
1543 // We've had bot failures before due to weird NaN support on mips hosts.
1544
1545 APInt Literal(64, Imm.Val);
1546
1547 if (Imm.IsFPImm) { // We got fp literal token
1548 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001549 return AMDGPU::isInlinableLiteral64(Imm.Val,
1550 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001551 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001552
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001553 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001554 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1555 return false;
1556
Sam Kolton9dffada2017-01-17 15:26:02 +00001557 if (type.getScalarSizeInBits() == 16) {
1558 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001559 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001560 AsmParser->hasInv2PiInlineImm());
1561 }
1562
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001563 // Check if single precision literal is inlinable
1564 return AMDGPU::isInlinableLiteral32(
1565 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001566 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001567 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001568
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001569 // We got int literal token.
1570 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001571 return AMDGPU::isInlinableLiteral64(Imm.Val,
1572 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001573 }
1574
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001575 if (!isSafeTruncation(Imm.Val, type.getScalarSizeInBits())) {
1576 return false;
1577 }
1578
Matt Arsenault4bd72362016-12-10 00:39:12 +00001579 if (type.getScalarSizeInBits() == 16) {
1580 return AMDGPU::isInlinableLiteral16(
1581 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1582 AsmParser->hasInv2PiInlineImm());
1583 }
1584
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001585 return AMDGPU::isInlinableLiteral32(
1586 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001587 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001588}
1589
1590bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001591 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001592 if (!isImmTy(ImmTyNone)) {
1593 return false;
1594 }
1595
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001596 if (!Imm.IsFPImm) {
1597 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001598
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001599 if (type == MVT::f64 && hasFPModifiers()) {
1600 // Cannot apply fp modifiers to int literals preserving the same semantics
1601 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1602 // disable these cases.
1603 return false;
1604 }
1605
Matt Arsenault4bd72362016-12-10 00:39:12 +00001606 unsigned Size = type.getSizeInBits();
1607 if (Size == 64)
1608 Size = 32;
1609
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001610 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1611 // types.
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001612 return isSafeTruncation(Imm.Val, Size);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001613 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001614
1615 // We got fp literal token
1616 if (type == MVT::f64) { // Expected 64-bit fp operand
1617 // We would set low 64-bits of literal to zeroes but we accept this literals
1618 return true;
1619 }
1620
1621 if (type == MVT::i64) { // Expected 64-bit int operand
1622 // We don't allow fp literals in 64-bit integer instructions. It is
1623 // unclear how we should encode them.
1624 return false;
1625 }
1626
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001627 // We allow fp literals with f16x2 operands assuming that the specified
1628 // literal goes into the lower half and the upper half is zero. We also
1629 // require that the literal may be losslesly converted to f16.
1630 MVT ExpectedType = (type == MVT::v2f16)? MVT::f16 :
1631 (type == MVT::v2i16)? MVT::i16 : type;
1632
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001633 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001634 return canLosslesslyConvertToFPType(FPLiteral, ExpectedType);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001635}
1636
1637bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001638 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001639}
1640
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001641bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001642 if (AsmParser->isVI())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001643 return isVReg32();
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001644 else if (AsmParser->isGFX9() || AsmParser->isGFX10())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001645 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001646 else
1647 return false;
1648}
1649
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001650bool AMDGPUOperand::isSDWAFP16Operand() const {
1651 return isSDWAOperand(MVT::f16);
1652}
1653
1654bool AMDGPUOperand::isSDWAFP32Operand() const {
1655 return isSDWAOperand(MVT::f32);
1656}
1657
1658bool AMDGPUOperand::isSDWAInt16Operand() const {
1659 return isSDWAOperand(MVT::i16);
1660}
1661
1662bool AMDGPUOperand::isSDWAInt32Operand() const {
1663 return isSDWAOperand(MVT::i32);
1664}
1665
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00001666bool AMDGPUOperand::isBoolReg() const {
Dmitry Preobrazhensky5e1dd022019-07-24 16:50:17 +00001667 return (AsmParser->getFeatureBits()[AMDGPU::FeatureWavefrontSize64] && isSCSrcB64()) ||
1668 (AsmParser->getFeatureBits()[AMDGPU::FeatureWavefrontSize32] && isSCSrcB32());
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00001669}
1670
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001671uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1672{
1673 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1674 assert(Size == 2 || Size == 4 || Size == 8);
1675
1676 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1677
1678 if (Imm.Mods.Abs) {
1679 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001680 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001681 if (Imm.Mods.Neg) {
1682 Val ^= FpSignMask;
1683 }
1684
1685 return Val;
1686}
1687
1688void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001689 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1690 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001691 addLiteralImmOperand(Inst, Imm.Val,
1692 ApplyModifiers &
1693 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001694 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001695 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1696 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001697 }
1698}
1699
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001700void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001701 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1702 auto OpNum = Inst.getNumOperands();
1703 // Check that this operand accepts literals
1704 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1705
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001706 if (ApplyModifiers) {
1707 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1708 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1709 Val = applyInputFPModifiers(Val, Size);
1710 }
1711
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001712 APInt Literal(64, Val);
1713 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001714
1715 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001716 switch (OpTy) {
1717 case AMDGPU::OPERAND_REG_IMM_INT64:
1718 case AMDGPU::OPERAND_REG_IMM_FP64:
1719 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001720 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001721 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1722 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001723 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001724 return;
1725 }
1726
1727 // Non-inlineable
1728 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001729 // For fp operands we check if low 32 bits are zeros
1730 if (Literal.getLoBits(32) != 0) {
1731 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001732 "Can't encode literal as exact 64-bit floating-point operand. "
1733 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001734 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001735
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001736 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001737 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001738 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001739
1740 // We don't allow fp literals in 64-bit integer instructions. It is
1741 // unclear how we should encode them. This case should be checked earlier
1742 // in predicate methods (isLiteralImm())
1743 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001744
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001745 case AMDGPU::OPERAND_REG_IMM_INT32:
1746 case AMDGPU::OPERAND_REG_IMM_FP32:
1747 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1748 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001749 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
1750 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001751 case AMDGPU::OPERAND_REG_IMM_INT16:
1752 case AMDGPU::OPERAND_REG_IMM_FP16:
1753 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1754 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1755 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001756 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001757 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
1758 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
1759 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
1760 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001761 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1762 case AMDGPU::OPERAND_REG_IMM_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001763 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001764 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001765 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001766 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001767 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001768 // We allow precision lost but not overflow or underflow. This should be
1769 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001770
1771 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001772 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001773 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001774 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001775 default:
1776 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001777 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001778
1779 return;
1780 }
1781
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001782 // We got int literal token.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001783 // Only sign extend inline immediates.
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001784 switch (OpTy) {
1785 case AMDGPU::OPERAND_REG_IMM_INT32:
1786 case AMDGPU::OPERAND_REG_IMM_FP32:
1787 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001788 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001789 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
1790 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001791 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1792 case AMDGPU::OPERAND_REG_IMM_V2FP16:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001793 if (isSafeTruncation(Val, 32) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001794 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1795 AsmParser->hasInv2PiInlineImm())) {
1796 Inst.addOperand(MCOperand::createImm(Val));
1797 return;
1798 }
1799
1800 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1801 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001802
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001803 case AMDGPU::OPERAND_REG_IMM_INT64:
1804 case AMDGPU::OPERAND_REG_IMM_FP64:
1805 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001806 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001807 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001808 Inst.addOperand(MCOperand::createImm(Val));
1809 return;
1810 }
1811
1812 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1813 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001814
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001815 case AMDGPU::OPERAND_REG_IMM_INT16:
1816 case AMDGPU::OPERAND_REG_IMM_FP16:
1817 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001818 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001819 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
1820 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001821 if (isSafeTruncation(Val, 16) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001822 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1823 AsmParser->hasInv2PiInlineImm())) {
1824 Inst.addOperand(MCOperand::createImm(Val));
1825 return;
1826 }
1827
1828 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1829 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001830
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001831 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001832 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1833 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
1834 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001835 assert(isSafeTruncation(Val, 16));
1836 assert(AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001837 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001838
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001839 Inst.addOperand(MCOperand::createImm(Val));
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001840 return;
1841 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001842 default:
1843 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001844 }
1845}
1846
Matt Arsenault4bd72362016-12-10 00:39:12 +00001847template <unsigned Bitwidth>
1848void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001849 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001850
1851 if (!Imm.IsFPImm) {
1852 // We got int literal token.
1853 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1854 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001855 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001856
1857 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001858 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001859 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1860 APFloat::rmNearestTiesToEven, &Lost);
1861 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001862}
1863
1864void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1865 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1866}
1867
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001868static bool isInlineValue(unsigned Reg) {
1869 switch (Reg) {
1870 case AMDGPU::SRC_SHARED_BASE:
1871 case AMDGPU::SRC_SHARED_LIMIT:
1872 case AMDGPU::SRC_PRIVATE_BASE:
1873 case AMDGPU::SRC_PRIVATE_LIMIT:
1874 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
1875 return true;
Dmitry Preobrazhensky9111f352019-06-03 13:51:24 +00001876 case AMDGPU::SRC_VCCZ:
1877 case AMDGPU::SRC_EXECZ:
1878 case AMDGPU::SRC_SCC:
1879 return true;
Dmitry Preobrazhensky9c68edd2019-09-02 13:42:25 +00001880 case AMDGPU::SGPR_NULL:
1881 return true;
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001882 default:
1883 return false;
1884 }
1885}
1886
1887bool AMDGPUOperand::isInlineValue() const {
1888 return isRegKind() && ::isInlineValue(getReg());
1889}
1890
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001891//===----------------------------------------------------------------------===//
1892// AsmParser
1893//===----------------------------------------------------------------------===//
1894
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001895static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1896 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001897 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001898 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001899 case 1: return AMDGPU::VGPR_32RegClassID;
1900 case 2: return AMDGPU::VReg_64RegClassID;
1901 case 3: return AMDGPU::VReg_96RegClassID;
1902 case 4: return AMDGPU::VReg_128RegClassID;
Stanislav Mekhanoshin5cdacea2019-07-24 16:21:18 +00001903 case 5: return AMDGPU::VReg_160RegClassID;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001904 case 8: return AMDGPU::VReg_256RegClassID;
1905 case 16: return AMDGPU::VReg_512RegClassID;
Stanislav Mekhanoshin5cdacea2019-07-24 16:21:18 +00001906 case 32: return AMDGPU::VReg_1024RegClassID;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001907 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001908 } else if (Is == IS_TTMP) {
1909 switch (RegWidth) {
1910 default: return -1;
1911 case 1: return AMDGPU::TTMP_32RegClassID;
1912 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001913 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001914 case 8: return AMDGPU::TTMP_256RegClassID;
1915 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001916 }
1917 } else if (Is == IS_SGPR) {
1918 switch (RegWidth) {
1919 default: return -1;
1920 case 1: return AMDGPU::SGPR_32RegClassID;
1921 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001922 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001923 case 8: return AMDGPU::SGPR_256RegClassID;
1924 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001925 }
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00001926 } else if (Is == IS_AGPR) {
1927 switch (RegWidth) {
1928 default: return -1;
1929 case 1: return AMDGPU::AGPR_32RegClassID;
1930 case 2: return AMDGPU::AReg_64RegClassID;
1931 case 4: return AMDGPU::AReg_128RegClassID;
1932 case 16: return AMDGPU::AReg_512RegClassID;
1933 case 32: return AMDGPU::AReg_1024RegClassID;
1934 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001935 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001936 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001937}
1938
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001939static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001940 return StringSwitch<unsigned>(RegName)
1941 .Case("exec", AMDGPU::EXEC)
1942 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001943 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001944 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001945 .Case("shared_base", AMDGPU::SRC_SHARED_BASE)
1946 .Case("src_shared_base", AMDGPU::SRC_SHARED_BASE)
1947 .Case("shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1948 .Case("src_shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1949 .Case("private_base", AMDGPU::SRC_PRIVATE_BASE)
1950 .Case("src_private_base", AMDGPU::SRC_PRIVATE_BASE)
1951 .Case("private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1952 .Case("src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1953 .Case("pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
1954 .Case("src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001955 .Case("lds_direct", AMDGPU::LDS_DIRECT)
1956 .Case("src_lds_direct", AMDGPU::LDS_DIRECT)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001957 .Case("m0", AMDGPU::M0)
Dmitry Preobrazhensky9111f352019-06-03 13:51:24 +00001958 .Case("vccz", AMDGPU::SRC_VCCZ)
1959 .Case("src_vccz", AMDGPU::SRC_VCCZ)
1960 .Case("execz", AMDGPU::SRC_EXECZ)
1961 .Case("src_execz", AMDGPU::SRC_EXECZ)
1962 .Case("scc", AMDGPU::SRC_SCC)
1963 .Case("src_scc", AMDGPU::SRC_SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001964 .Case("tba", AMDGPU::TBA)
1965 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001966 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1967 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001968 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1969 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001970 .Case("vcc_lo", AMDGPU::VCC_LO)
1971 .Case("vcc_hi", AMDGPU::VCC_HI)
1972 .Case("exec_lo", AMDGPU::EXEC_LO)
1973 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001974 .Case("tma_lo", AMDGPU::TMA_LO)
1975 .Case("tma_hi", AMDGPU::TMA_HI)
1976 .Case("tba_lo", AMDGPU::TBA_LO)
1977 .Case("tba_hi", AMDGPU::TBA_HI)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00001978 .Case("null", AMDGPU::SGPR_NULL)
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00001979 .Default(AMDGPU::NoRegister);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001980}
1981
Eugene Zelenko66203762017-01-21 00:53:49 +00001982bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1983 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001984 auto R = parseRegister();
1985 if (!R) return true;
1986 assert(R->isReg());
1987 RegNo = R->getReg();
1988 StartLoc = R->getStartLoc();
1989 EndLoc = R->getEndLoc();
1990 return false;
1991}
1992
Eugene Zelenko66203762017-01-21 00:53:49 +00001993bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00001994 RegisterKind RegKind, unsigned Reg1) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001995 switch (RegKind) {
1996 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001997 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1998 Reg = AMDGPU::EXEC;
1999 RegWidth = 2;
2000 return true;
2001 }
2002 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
2003 Reg = AMDGPU::FLAT_SCR;
2004 RegWidth = 2;
2005 return true;
2006 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00002007 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
2008 Reg = AMDGPU::XNACK_MASK;
2009 RegWidth = 2;
2010 return true;
2011 }
Eugene Zelenko66203762017-01-21 00:53:49 +00002012 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
2013 Reg = AMDGPU::VCC;
2014 RegWidth = 2;
2015 return true;
2016 }
2017 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
2018 Reg = AMDGPU::TBA;
2019 RegWidth = 2;
2020 return true;
2021 }
2022 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
2023 Reg = AMDGPU::TMA;
2024 RegWidth = 2;
2025 return true;
2026 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002027 return false;
2028 case IS_VGPR:
2029 case IS_SGPR:
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00002030 case IS_AGPR:
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002031 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00002032 if (Reg1 != Reg + RegWidth) {
2033 return false;
2034 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002035 RegWidth++;
2036 return true;
2037 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00002038 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002039 }
2040}
2041
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002042struct RegInfo {
2043 StringLiteral Name;
2044 RegisterKind Kind;
2045};
2046
2047static constexpr RegInfo RegularRegisters[] = {
2048 {{"v"}, IS_VGPR},
2049 {{"s"}, IS_SGPR},
2050 {{"ttmp"}, IS_TTMP},
2051 {{"acc"}, IS_AGPR},
2052 {{"a"}, IS_AGPR},
2053};
2054
2055static bool isRegularReg(RegisterKind Kind) {
2056 return Kind == IS_VGPR ||
2057 Kind == IS_SGPR ||
2058 Kind == IS_TTMP ||
2059 Kind == IS_AGPR;
2060}
2061
2062static const RegInfo* getRegularRegInfo(StringRef Str) {
2063 for (const RegInfo &Reg : RegularRegisters)
2064 if (Str.startswith(Reg.Name))
2065 return &Reg;
2066 return nullptr;
2067}
2068
2069static bool getRegNum(StringRef Str, unsigned& Num) {
2070 return !Str.getAsInteger(10, Num);
2071}
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002072
2073bool
2074AMDGPUAsmParser::isRegister(const AsmToken &Token,
2075 const AsmToken &NextToken) const {
2076
2077 // A list of consecutive registers: [s0,s1,s2,s3]
2078 if (Token.is(AsmToken::LBrac))
2079 return true;
2080
2081 if (!Token.is(AsmToken::Identifier))
2082 return false;
2083
2084 // A single register like s0 or a range of registers like s[0:1]
2085
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002086 StringRef Str = Token.getString();
2087 const RegInfo *Reg = getRegularRegInfo(Str);
2088 if (Reg) {
2089 StringRef RegName = Reg->Name;
2090 StringRef RegSuffix = Str.substr(RegName.size());
2091 if (!RegSuffix.empty()) {
2092 unsigned Num;
2093 // A single register with an index: rXX
2094 if (getRegNum(RegSuffix, Num))
2095 return true;
2096 } else {
2097 // A range of registers: r[XX:YY].
2098 if (NextToken.is(AsmToken::LBrac))
2099 return true;
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002100 }
2101 }
2102
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002103 return getSpecialRegForName(Str) != AMDGPU::NoRegister;
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002104}
2105
2106bool
2107AMDGPUAsmParser::isRegister()
2108{
2109 return isRegister(getToken(), peekToken());
2110}
2111
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002112unsigned
2113AMDGPUAsmParser::getRegularReg(RegisterKind RegKind,
2114 unsigned RegNum,
2115 unsigned RegWidth) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002116
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002117 assert(isRegularReg(RegKind));
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002118
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002119 unsigned AlignSize = 1;
2120 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
2121 // SGPR and TTMP registers must be aligned.
2122 // Max required alignment is 4 dwords.
2123 AlignSize = std::min(RegWidth, 4u);
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002124 }
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002125
2126 if (RegNum % AlignSize != 0)
2127 return AMDGPU::NoRegister;
2128
2129 unsigned RegIdx = RegNum / AlignSize;
2130 int RCID = getRegClass(RegKind, RegWidth);
2131 if (RCID == -1)
2132 return AMDGPU::NoRegister;
2133
2134 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2135 const MCRegisterClass RC = TRI->getRegClass(RCID);
2136 if (RegIdx >= RC.getNumRegs())
2137 return AMDGPU::NoRegister;
2138
2139 return RC.getRegister(RegIdx);
2140}
2141
2142bool
2143AMDGPUAsmParser::ParseRegRange(unsigned& Num, unsigned& Width) {
2144 int64_t RegLo, RegHi;
2145 if (!trySkipToken(AsmToken::LBrac))
2146 return false;
2147
2148 if (!parseExpr(RegLo))
2149 return false;
2150
2151 if (trySkipToken(AsmToken::Colon)) {
2152 if (!parseExpr(RegHi))
2153 return false;
2154 } else {
2155 RegHi = RegLo;
2156 }
2157
2158 if (!trySkipToken(AsmToken::RBrac))
2159 return false;
2160
2161 if (!isUInt<32>(RegLo) || !isUInt<32>(RegHi) || RegLo > RegHi)
2162 return false;
2163
2164 Num = static_cast<unsigned>(RegLo);
2165 Width = (RegHi - RegLo) + 1;
2166 return true;
2167}
2168
2169unsigned
2170AMDGPUAsmParser::ParseSpecialReg(RegisterKind &RegKind,
2171 unsigned &RegNum,
2172 unsigned &RegWidth) {
2173 assert(isToken(AsmToken::Identifier));
2174 unsigned Reg = getSpecialRegForName(getTokenStr());
2175 if (Reg) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002176 RegNum = 0;
2177 RegWidth = 1;
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002178 RegKind = IS_SPECIAL;
2179 lex(); // skip register name
2180 }
2181 return Reg;
2182}
2183
2184unsigned
2185AMDGPUAsmParser::ParseRegularReg(RegisterKind &RegKind,
2186 unsigned &RegNum,
2187 unsigned &RegWidth) {
2188 assert(isToken(AsmToken::Identifier));
2189 StringRef RegName = getTokenStr();
2190
2191 const RegInfo *RI = getRegularRegInfo(RegName);
2192 if (!RI)
2193 return AMDGPU::NoRegister;
2194 lex(); // skip register name
2195
2196 RegKind = RI->Kind;
2197 StringRef RegSuffix = RegName.substr(RI->Name.size());
2198 if (!RegSuffix.empty()) {
2199 // Single 32-bit register: vXX.
2200 if (!getRegNum(RegSuffix, RegNum))
2201 return AMDGPU::NoRegister;
2202 RegWidth = 1;
2203 } else {
2204 // Range of registers: v[XX:YY]. ":YY" is optional.
2205 if (!ParseRegRange(RegNum, RegWidth))
2206 return AMDGPU::NoRegister;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002207 }
2208
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002209 return getRegularReg(RegKind, RegNum, RegWidth);
2210}
2211
2212unsigned
2213AMDGPUAsmParser::ParseRegList(RegisterKind &RegKind,
2214 unsigned &RegNum,
2215 unsigned &RegWidth) {
2216 unsigned Reg = AMDGPU::NoRegister;
2217
2218 if (!trySkipToken(AsmToken::LBrac))
2219 return AMDGPU::NoRegister;
2220
2221 // List of consecutive registers, e.g.: [s0,s1,s2,s3]
2222
2223 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
2224 return AMDGPU::NoRegister;
2225 if (RegWidth != 1)
2226 return AMDGPU::NoRegister;
2227
2228 for (; trySkipToken(AsmToken::Comma); ) {
2229 RegisterKind NextRegKind;
2230 unsigned NextReg, NextRegNum, NextRegWidth;
2231
2232 if (!ParseAMDGPURegister(NextRegKind, NextReg, NextRegNum, NextRegWidth))
2233 return AMDGPU::NoRegister;
2234 if (NextRegWidth != 1)
2235 return AMDGPU::NoRegister;
2236 if (NextRegKind != RegKind)
2237 return AMDGPU::NoRegister;
2238 if (!AddNextRegisterToList(Reg, RegWidth, RegKind, NextReg))
2239 return AMDGPU::NoRegister;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002240 }
2241
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002242 if (!trySkipToken(AsmToken::RBrac))
2243 return AMDGPU::NoRegister;
2244
2245 if (isRegularReg(RegKind))
2246 Reg = getRegularReg(RegKind, RegNum, RegWidth);
2247
2248 return Reg;
2249}
2250
2251bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind,
2252 unsigned &Reg,
2253 unsigned &RegNum,
2254 unsigned &RegWidth) {
2255 Reg = AMDGPU::NoRegister;
2256
2257 if (isToken(AsmToken::Identifier)) {
2258 Reg = ParseSpecialReg(RegKind, RegNum, RegWidth);
2259 if (Reg == AMDGPU::NoRegister)
2260 Reg = ParseRegularReg(RegKind, RegNum, RegWidth);
2261 } else {
2262 Reg = ParseRegList(RegKind, RegNum, RegWidth);
2263 }
2264
2265 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2266 return Reg != AMDGPU::NoRegister && subtargetHasRegister(*TRI, Reg);
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002267}
2268
Scott Linder1e8c2c72018-06-21 19:38:56 +00002269Optional<StringRef>
2270AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
2271 switch (RegKind) {
2272 case IS_VGPR:
2273 return StringRef(".amdgcn.next_free_vgpr");
2274 case IS_SGPR:
2275 return StringRef(".amdgcn.next_free_sgpr");
2276 default:
2277 return None;
2278 }
2279}
2280
2281void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
2282 auto SymbolName = getGprCountSymbolName(RegKind);
2283 assert(SymbolName && "initializing invalid register kind");
2284 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
2285 Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
2286}
2287
2288bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
2289 unsigned DwordRegIndex,
2290 unsigned RegWidth) {
2291 // Symbols are only defined for GCN targets
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002292 if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6)
Scott Linder1e8c2c72018-06-21 19:38:56 +00002293 return true;
2294
2295 auto SymbolName = getGprCountSymbolName(RegKind);
2296 if (!SymbolName)
2297 return true;
2298 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
2299
2300 int64_t NewMax = DwordRegIndex + RegWidth - 1;
2301 int64_t OldCount;
2302
2303 if (!Sym->isVariable())
2304 return !Error(getParser().getTok().getLoc(),
2305 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
2306 if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
2307 return !Error(
2308 getParser().getTok().getLoc(),
2309 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
2310
2311 if (OldCount <= NewMax)
2312 Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
2313
2314 return true;
2315}
2316
Valery Pykhtin0f97f172016-03-14 07:43:42 +00002317std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002318 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00002319 SMLoc StartLoc = Tok.getLoc();
2320 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002321 RegisterKind RegKind;
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002322 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002323
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002324 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002325 //FIXME: improve error messages (bug 41303).
2326 Error(StartLoc, "not a valid operand.");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002327 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002328 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00002329 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002330 if (!updateGprCountSymbols(RegKind, RegNum, RegWidth))
Scott Linder1e8c2c72018-06-21 19:38:56 +00002331 return nullptr;
2332 } else
Dmitry Preobrazhensky436d5b32019-09-27 15:41:31 +00002333 KernelScope.usesRegister(RegKind, RegNum, RegWidth);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002334 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002335}
2336
Alex Bradbury58eba092016-11-01 16:32:05 +00002337OperandMatchResultTy
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002338AMDGPUAsmParser::parseImm(OperandVector &Operands, bool HasSP3AbsModifier) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002339 // TODO: add syntactic sugar for 1/(2*PI)
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002340
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002341 assert(!isRegister());
2342 assert(!isModifier());
2343
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002344 const auto& Tok = getToken();
2345 const auto& NextTok = peekToken();
2346 bool IsReal = Tok.is(AsmToken::Real);
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002347 SMLoc S = getLoc();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002348 bool Negate = false;
2349
2350 if (!IsReal && Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Real)) {
2351 lex();
2352 IsReal = true;
2353 Negate = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002354 }
2355
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002356 if (IsReal) {
2357 // Floating-point expressions are not supported.
2358 // Can only allow floating-point literals with an
2359 // optional sign.
2360
2361 StringRef Num = getTokenStr();
2362 lex();
2363
2364 APFloat RealVal(APFloat::IEEEdouble());
2365 auto roundMode = APFloat::rmNearestTiesToEven;
2366 if (RealVal.convertFromString(Num, roundMode) == APFloat::opInvalidOp) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002367 return MatchOperand_ParseFail;
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002368 }
2369 if (Negate)
2370 RealVal.changeSign();
2371
2372 Operands.push_back(
2373 AMDGPUOperand::CreateImm(this, RealVal.bitcastToAPInt().getZExtValue(), S,
2374 AMDGPUOperand::ImmTyNone, true));
2375
2376 return MatchOperand_Success;
2377
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002378 } else {
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002379 int64_t IntVal;
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002380 const MCExpr *Expr;
2381 SMLoc S = getLoc();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002382
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002383 if (HasSP3AbsModifier) {
2384 // This is a workaround for handling expressions
2385 // as arguments of SP3 'abs' modifier, for example:
2386 // |1.0|
2387 // |-1|
2388 // |1+x|
2389 // This syntax is not compatible with syntax of standard
2390 // MC expressions (due to the trailing '|').
2391 SMLoc EndLoc;
2392 if (getParser().parsePrimaryExpr(Expr, EndLoc))
2393 return MatchOperand_ParseFail;
2394 } else {
2395 if (Parser.parseExpression(Expr))
2396 return MatchOperand_ParseFail;
2397 }
2398
2399 if (Expr->evaluateAsAbsolute(IntVal)) {
2400 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
2401 } else {
2402 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
2403 }
2404
Sam Kolton1bdcef72016-05-23 09:59:02 +00002405 return MatchOperand_Success;
2406 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002407
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002408 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002409}
2410
Alex Bradbury58eba092016-11-01 16:32:05 +00002411OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00002412AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002413 if (!isRegister())
2414 return MatchOperand_NoMatch;
2415
Sam Kolton1bdcef72016-05-23 09:59:02 +00002416 if (auto R = parseRegister()) {
2417 assert(R->isReg());
Sam Kolton1bdcef72016-05-23 09:59:02 +00002418 Operands.push_back(std::move(R));
2419 return MatchOperand_Success;
2420 }
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002421 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002422}
2423
Alex Bradbury58eba092016-11-01 16:32:05 +00002424OperandMatchResultTy
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002425AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002426 auto res = parseReg(Operands);
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002427 if (res != MatchOperand_NoMatch) {
2428 return res;
2429 } else if (isModifier()) {
2430 return MatchOperand_NoMatch;
2431 } else {
2432 return parseImm(Operands, HasSP3AbsMod);
2433 }
2434}
2435
2436bool
2437AMDGPUAsmParser::isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
2438 if (Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::LParen)) {
2439 const auto &str = Token.getString();
2440 return str == "abs" || str == "neg" || str == "sext";
2441 }
2442 return false;
2443}
2444
2445bool
2446AMDGPUAsmParser::isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const {
2447 return Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::Colon);
2448}
2449
2450bool
2451AMDGPUAsmParser::isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
2452 return isNamedOperandModifier(Token, NextToken) || Token.is(AsmToken::Pipe);
2453}
2454
2455bool
2456AMDGPUAsmParser::isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
2457 return isRegister(Token, NextToken) || isOperandModifier(Token, NextToken);
2458}
2459
2460// Check if this is an operand modifier or an opcode modifier
2461// which may look like an expression but it is not. We should
2462// avoid parsing these modifiers as expressions. Currently
2463// recognized sequences are:
2464// |...|
2465// abs(...)
2466// neg(...)
2467// sext(...)
2468// -reg
2469// -|...|
2470// -abs(...)
2471// name:...
2472// Note that simple opcode modifiers like 'gds' may be parsed as
2473// expressions; this is a special case. See getExpressionAsToken.
2474//
2475bool
2476AMDGPUAsmParser::isModifier() {
2477
2478 AsmToken Tok = getToken();
2479 AsmToken NextToken[2];
2480 peekTokens(NextToken);
2481
2482 return isOperandModifier(Tok, NextToken[0]) ||
2483 (Tok.is(AsmToken::Minus) && isRegOrOperandModifier(NextToken[0], NextToken[1])) ||
2484 isOpcodeModifierWithVal(Tok, NextToken[0]);
Sam Kolton9772eb32017-01-11 11:46:30 +00002485}
2486
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002487// Check if the current token is an SP3 'neg' modifier.
2488// Currently this modifier is allowed in the following context:
2489//
2490// 1. Before a register, e.g. "-v0", "-v[...]" or "-[v0,v1]".
2491// 2. Before an 'abs' modifier: -abs(...)
2492// 3. Before an SP3 'abs' modifier: -|...|
2493//
2494// In all other cases "-" is handled as a part
2495// of an expression that follows the sign.
2496//
2497// Note: When "-" is followed by an integer literal,
2498// this is interpreted as integer negation rather
2499// than a floating-point NEG modifier applied to N.
2500// Beside being contr-intuitive, such use of floating-point
2501// NEG modifier would have resulted in different meaning
2502// of integer literals used with VOP1/2/C and VOP3,
2503// for example:
2504// v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
2505// v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
2506// Negative fp literals with preceding "-" are
2507// handled likewise for unifomtity
2508//
2509bool
2510AMDGPUAsmParser::parseSP3NegModifier() {
2511
2512 AsmToken NextToken[2];
2513 peekTokens(NextToken);
2514
2515 if (isToken(AsmToken::Minus) &&
2516 (isRegister(NextToken[0], NextToken[1]) ||
2517 NextToken[0].is(AsmToken::Pipe) ||
2518 isId(NextToken[0], "abs"))) {
2519 lex();
2520 return true;
2521 }
2522
2523 return false;
2524}
2525
Sam Kolton9772eb32017-01-11 11:46:30 +00002526OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002527AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
2528 bool AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002529 bool Neg, SP3Neg;
2530 bool Abs, SP3Abs;
2531 SMLoc Loc;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002532
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002533 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
2534 if (isToken(AsmToken::Minus) && peekToken().is(AsmToken::Minus)) {
2535 Error(getLoc(), "invalid syntax, expected 'neg' modifier");
2536 return MatchOperand_ParseFail;
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002537 }
2538
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002539 SP3Neg = parseSP3NegModifier();
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002540
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002541 Loc = getLoc();
2542 Neg = trySkipId("neg");
2543 if (Neg && SP3Neg) {
2544 Error(Loc, "expected register or immediate");
2545 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002546 }
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002547 if (Neg && !skipToken(AsmToken::LParen, "expected left paren after neg"))
2548 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002549
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002550 Abs = trySkipId("abs");
2551 if (Abs && !skipToken(AsmToken::LParen, "expected left paren after abs"))
2552 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002553
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002554 Loc = getLoc();
2555 SP3Abs = trySkipToken(AsmToken::Pipe);
2556 if (Abs && SP3Abs) {
2557 Error(Loc, "expected register or immediate");
2558 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002559 }
2560
Sam Kolton9772eb32017-01-11 11:46:30 +00002561 OperandMatchResultTy Res;
2562 if (AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002563 Res = parseRegOrImm(Operands, SP3Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00002564 } else {
2565 Res = parseReg(Operands);
2566 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002567 if (Res != MatchOperand_Success) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002568 return (SP3Neg || Neg || SP3Abs || Abs)? MatchOperand_ParseFail : Res;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002569 }
2570
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002571 if (SP3Abs && !skipToken(AsmToken::Pipe, "expected vertical bar"))
2572 return MatchOperand_ParseFail;
2573 if (Abs && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2574 return MatchOperand_ParseFail;
2575 if (Neg && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2576 return MatchOperand_ParseFail;
2577
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002578 AMDGPUOperand::Modifiers Mods;
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002579 Mods.Abs = Abs || SP3Abs;
2580 Mods.Neg = Neg || SP3Neg;
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002581
Sam Kolton945231a2016-06-10 09:57:59 +00002582 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002583 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002584 if (Op.isExpr()) {
2585 Error(Op.getStartLoc(), "expected an absolute expression");
2586 return MatchOperand_ParseFail;
2587 }
Sam Kolton945231a2016-06-10 09:57:59 +00002588 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00002589 }
2590 return MatchOperand_Success;
2591}
2592
Alex Bradbury58eba092016-11-01 16:32:05 +00002593OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002594AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
2595 bool AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002596 bool Sext = trySkipId("sext");
2597 if (Sext && !skipToken(AsmToken::LParen, "expected left paren after sext"))
2598 return MatchOperand_ParseFail;
Sam Kolton945231a2016-06-10 09:57:59 +00002599
Sam Kolton9772eb32017-01-11 11:46:30 +00002600 OperandMatchResultTy Res;
2601 if (AllowImm) {
2602 Res = parseRegOrImm(Operands);
2603 } else {
2604 Res = parseReg(Operands);
2605 }
Sam Kolton945231a2016-06-10 09:57:59 +00002606 if (Res != MatchOperand_Success) {
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002607 return Sext? MatchOperand_ParseFail : Res;
Sam Kolton945231a2016-06-10 09:57:59 +00002608 }
2609
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002610 if (Sext && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2611 return MatchOperand_ParseFail;
2612
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002613 AMDGPUOperand::Modifiers Mods;
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002614 Mods.Sext = Sext;
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002615
Sam Kolton945231a2016-06-10 09:57:59 +00002616 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002617 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00002618 if (Op.isExpr()) {
2619 Error(Op.getStartLoc(), "expected an absolute expression");
2620 return MatchOperand_ParseFail;
2621 }
Sam Kolton945231a2016-06-10 09:57:59 +00002622 Op.setModifiers(Mods);
2623 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002624
Sam Kolton945231a2016-06-10 09:57:59 +00002625 return MatchOperand_Success;
2626}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002627
Sam Kolton9772eb32017-01-11 11:46:30 +00002628OperandMatchResultTy
2629AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2630 return parseRegOrImmWithFPInputMods(Operands, false);
2631}
2632
2633OperandMatchResultTy
2634AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2635 return parseRegOrImmWithIntInputMods(Operands, false);
2636}
2637
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002638OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002639 auto Loc = getLoc();
2640 if (trySkipId("off")) {
2641 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Loc,
2642 AMDGPUOperand::ImmTyOff, false));
2643 return MatchOperand_Success;
2644 }
2645
2646 if (!isRegister())
2647 return MatchOperand_NoMatch;
2648
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002649 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2650 if (Reg) {
2651 Operands.push_back(std::move(Reg));
2652 return MatchOperand_Success;
2653 }
2654
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002655 return MatchOperand_ParseFail;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002656
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002657}
2658
Tom Stellard45bb48e2015-06-13 03:28:10 +00002659unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002660 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2661
2662 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002663 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2664 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2665 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002666 return Match_InvalidOperand;
2667
Tom Stellard88e0b252015-10-06 15:57:53 +00002668 if ((TSFlags & SIInstrFlags::VOP3) &&
2669 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2670 getForcedEncodingSize() != 64)
2671 return Match_PreferE32;
2672
Sam Koltona568e3d2016-12-22 12:57:41 +00002673 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2674 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002675 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002676 auto OpNum =
2677 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002678 const auto &Op = Inst.getOperand(OpNum);
2679 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2680 return Match_InvalidOperand;
2681 }
2682 }
2683
Tom Stellard45bb48e2015-06-13 03:28:10 +00002684 return Match_Success;
2685}
2686
Matt Arsenault5f45e782017-01-09 18:44:11 +00002687// What asm variants we should check
2688ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2689 if (getForcedEncodingSize() == 32) {
2690 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2691 return makeArrayRef(Variants);
2692 }
2693
2694 if (isForcedVOP3()) {
2695 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2696 return makeArrayRef(Variants);
2697 }
2698
2699 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002700 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2701 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002702 return makeArrayRef(Variants);
2703 }
2704
2705 if (isForcedDPP()) {
2706 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2707 return makeArrayRef(Variants);
2708 }
2709
2710 static const unsigned Variants[] = {
2711 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002712 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002713 };
2714
2715 return makeArrayRef(Variants);
2716}
2717
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002718unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2719 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2720 const unsigned Num = Desc.getNumImplicitUses();
2721 for (unsigned i = 0; i < Num; ++i) {
2722 unsigned Reg = Desc.ImplicitUses[i];
2723 switch (Reg) {
2724 case AMDGPU::FLAT_SCR:
2725 case AMDGPU::VCC:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00002726 case AMDGPU::VCC_LO:
2727 case AMDGPU::VCC_HI:
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002728 case AMDGPU::M0:
2729 return Reg;
2730 default:
2731 break;
2732 }
2733 }
2734 return AMDGPU::NoRegister;
2735}
2736
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002737// NB: This code is correct only when used to check constant
2738// bus limitations because GFX7 support no f16 inline constants.
2739// Note that there are no cases when a GFX7 opcode violates
2740// constant bus limitations due to the use of an f16 constant.
2741bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2742 unsigned OpIdx) const {
2743 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2744
2745 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2746 return false;
2747 }
2748
2749 const MCOperand &MO = Inst.getOperand(OpIdx);
2750
2751 int64_t Val = MO.getImm();
2752 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2753
2754 switch (OpSize) { // expected operand size
2755 case 8:
2756 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2757 case 4:
2758 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2759 case 2: {
2760 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2761 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00002762 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16 ||
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00002763 OperandType == AMDGPU::OPERAND_REG_INLINE_AC_V2INT16 ||
2764 OperandType == AMDGPU::OPERAND_REG_INLINE_AC_V2FP16 ||
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00002765 OperandType == AMDGPU::OPERAND_REG_IMM_V2INT16 ||
2766 OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002767 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2768 } else {
2769 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2770 }
2771 }
2772 default:
2773 llvm_unreachable("invalid operand size");
2774 }
2775}
2776
Dmitry Preobrazhenskyfe2ee4c2019-09-02 12:50:05 +00002777unsigned AMDGPUAsmParser::getConstantBusLimit(unsigned Opcode) const {
2778 if (!isGFX10())
2779 return 1;
2780
2781 switch (Opcode) {
2782 // 64-bit shift instructions can use only one scalar value input
2783 case AMDGPU::V_LSHLREV_B64:
2784 case AMDGPU::V_LSHLREV_B64_gfx10:
2785 case AMDGPU::V_LSHL_B64:
2786 case AMDGPU::V_LSHRREV_B64:
2787 case AMDGPU::V_LSHRREV_B64_gfx10:
2788 case AMDGPU::V_LSHR_B64:
2789 case AMDGPU::V_ASHRREV_I64:
2790 case AMDGPU::V_ASHRREV_I64_gfx10:
2791 case AMDGPU::V_ASHR_I64:
2792 return 1;
2793 default:
2794 return 2;
2795 }
2796}
2797
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002798bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2799 const MCOperand &MO = Inst.getOperand(OpIdx);
2800 if (MO.isImm()) {
2801 return !isInlineConstant(Inst, OpIdx);
Dmitry Preobrazhensky4aa90ea2019-09-02 14:19:52 +00002802 } else if (MO.isReg()) {
2803 auto Reg = MO.getReg();
2804 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2805 return isSGPR(mc2PseudoReg(Reg), TRI) && Reg != SGPR_NULL;
2806 } else {
2807 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002808 }
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002809}
2810
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002811bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002812 const unsigned Opcode = Inst.getOpcode();
2813 const MCInstrDesc &Desc = MII.get(Opcode);
2814 unsigned ConstantBusUseCount = 0;
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002815 unsigned NumLiterals = 0;
2816 unsigned LiteralSize;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002817
2818 if (Desc.TSFlags &
2819 (SIInstrFlags::VOPC |
2820 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002821 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2822 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002823 // Check special imm operands (used by madmk, etc)
2824 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2825 ++ConstantBusUseCount;
2826 }
2827
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002828 SmallDenseSet<unsigned> SGPRsUsed;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002829 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2830 if (SGPRUsed != AMDGPU::NoRegister) {
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002831 SGPRsUsed.insert(SGPRUsed);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002832 ++ConstantBusUseCount;
2833 }
2834
2835 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2836 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2837 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2838
2839 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2840
2841 for (int OpIdx : OpIndices) {
2842 if (OpIdx == -1) break;
2843
2844 const MCOperand &MO = Inst.getOperand(OpIdx);
2845 if (usesConstantBus(Inst, OpIdx)) {
2846 if (MO.isReg()) {
2847 const unsigned Reg = mc2PseudoReg(MO.getReg());
2848 // Pairs of registers with a partial intersections like these
2849 // s0, s[0:1]
2850 // flat_scratch_lo, flat_scratch
2851 // flat_scratch_lo, flat_scratch_hi
2852 // are theoretically valid but they are disabled anyway.
2853 // Note that this code mimics SIInstrInfo::verifyInstruction
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002854 if (!SGPRsUsed.count(Reg)) {
2855 SGPRsUsed.insert(Reg);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002856 ++ConstantBusUseCount;
2857 }
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002858 } else { // Expression or a literal
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002859
2860 if (Desc.OpInfo[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE)
2861 continue; // special operand like VINTERP attr_chan
2862
2863 // An instruction may use only one literal.
2864 // This has been validated on the previous step.
2865 // See validateVOP3Literal.
2866 // This literal may be used as more than one operand.
2867 // If all these operands are of the same size,
2868 // this literal counts as one scalar value.
2869 // Otherwise it counts as 2 scalar values.
2870 // See "GFX10 Shader Programming", section 3.6.2.3.
2871
2872 unsigned Size = AMDGPU::getOperandSize(Desc, OpIdx);
2873 if (Size < 4) Size = 4;
2874
2875 if (NumLiterals == 0) {
2876 NumLiterals = 1;
2877 LiteralSize = Size;
2878 } else if (LiteralSize != Size) {
2879 NumLiterals = 2;
2880 }
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002881 }
2882 }
2883 }
2884 }
Stanislav Mekhanoshinf2baae02019-05-02 03:47:23 +00002885 ConstantBusUseCount += NumLiterals;
2886
Dmitry Preobrazhenskyfe2ee4c2019-09-02 12:50:05 +00002887 return ConstantBusUseCount <= getConstantBusLimit(Opcode);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002888}
2889
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002890bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002891 const unsigned Opcode = Inst.getOpcode();
2892 const MCInstrDesc &Desc = MII.get(Opcode);
2893
2894 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2895 if (DstIdx == -1 ||
2896 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2897 return true;
2898 }
2899
2900 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2901
2902 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2903 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2904 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2905
2906 assert(DstIdx != -1);
2907 const MCOperand &Dst = Inst.getOperand(DstIdx);
2908 assert(Dst.isReg());
2909 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2910
2911 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2912
2913 for (int SrcIdx : SrcIndices) {
2914 if (SrcIdx == -1) break;
2915 const MCOperand &Src = Inst.getOperand(SrcIdx);
2916 if (Src.isReg()) {
2917 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2918 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2919 return false;
2920 }
2921 }
2922 }
2923
2924 return true;
2925}
2926
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002927bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2928
2929 const unsigned Opc = Inst.getOpcode();
2930 const MCInstrDesc &Desc = MII.get(Opc);
2931
2932 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2933 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2934 assert(ClampIdx != -1);
2935 return Inst.getOperand(ClampIdx).getImm() == 0;
2936 }
2937
2938 return true;
2939}
2940
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002941bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2942
2943 const unsigned Opc = Inst.getOpcode();
2944 const MCInstrDesc &Desc = MII.get(Opc);
2945
2946 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2947 return true;
2948
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002949 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2950 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2951 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2952
2953 assert(VDataIdx != -1);
2954 assert(DMaskIdx != -1);
2955 assert(TFEIdx != -1);
2956
2957 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2958 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2959 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2960 if (DMask == 0)
2961 DMask = 1;
2962
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002963 unsigned DataSize =
2964 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2965 if (hasPackedD16()) {
2966 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2967 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2968 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002969 }
2970
2971 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002972}
2973
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00002974bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst) {
2975 const unsigned Opc = Inst.getOpcode();
2976 const MCInstrDesc &Desc = MII.get(Opc);
2977
2978 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0 || !isGFX10())
2979 return true;
2980
2981 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
2982 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
2983 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
2984 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
2985 int SrsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
2986 int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
2987
2988 assert(VAddr0Idx != -1);
2989 assert(SrsrcIdx != -1);
2990 assert(DimIdx != -1);
2991 assert(SrsrcIdx > VAddr0Idx);
2992
2993 unsigned Dim = Inst.getOperand(DimIdx).getImm();
2994 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim);
2995 bool IsNSA = SrsrcIdx - VAddr0Idx > 1;
2996 unsigned VAddrSize =
2997 IsNSA ? SrsrcIdx - VAddr0Idx
2998 : AMDGPU::getRegOperandSize(getMRI(), Desc, VAddr0Idx) / 4;
2999
3000 unsigned AddrSize = BaseOpcode->NumExtraArgs +
3001 (BaseOpcode->Gradients ? DimInfo->NumGradients : 0) +
3002 (BaseOpcode->Coordinates ? DimInfo->NumCoords : 0) +
3003 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
3004 if (!IsNSA) {
3005 if (AddrSize > 8)
3006 AddrSize = 16;
3007 else if (AddrSize > 4)
3008 AddrSize = 8;
3009 }
3010
3011 return VAddrSize == AddrSize;
3012}
3013
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00003014bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
3015
3016 const unsigned Opc = Inst.getOpcode();
3017 const MCInstrDesc &Desc = MII.get(Opc);
3018
3019 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
3020 return true;
3021 if (!Desc.mayLoad() || !Desc.mayStore())
3022 return true; // Not atomic
3023
3024 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
3025 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
3026
3027 // This is an incomplete check because image_atomic_cmpswap
3028 // may only use 0x3 and 0xf while other atomic operations
3029 // may use 0x1 and 0x3. However these limitations are
3030 // verified when we check that dmask matches dst size.
3031 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
3032}
3033
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00003034bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
3035
3036 const unsigned Opc = Inst.getOpcode();
3037 const MCInstrDesc &Desc = MII.get(Opc);
3038
3039 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
3040 return true;
3041
3042 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
3043 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
3044
3045 // GATHER4 instructions use dmask in a different fashion compared to
3046 // other MIMG instructions. The only useful DMASK values are
3047 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
3048 // (red,red,red,red) etc.) The ISA document doesn't mention
3049 // this.
3050 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
3051}
3052
Dmitry Preobrazhenskyedd9f702019-11-18 17:23:40 +03003053static bool IsMovrelsSDWAOpcode(const unsigned Opcode)
3054{
3055 switch (Opcode) {
3056 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
3057 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
3058 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
3059 return true;
3060 default:
3061 return false;
3062 }
3063}
3064
3065// movrels* opcodes should only allow VGPRS as src0.
3066// This is specified in .td description for vop1/vop3,
3067// but sdwa is handled differently. See isSDWAOperand.
3068bool AMDGPUAsmParser::validateMovrels(const MCInst &Inst) {
3069
3070 const unsigned Opc = Inst.getOpcode();
3071 const MCInstrDesc &Desc = MII.get(Opc);
3072
3073 if ((Desc.TSFlags & SIInstrFlags::SDWA) == 0 || !IsMovrelsSDWAOpcode(Opc))
3074 return true;
3075
3076 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
3077 assert(Src0Idx != -1);
3078
3079 const MCOperand &Src0 = Inst.getOperand(Src0Idx);
3080 if (!Src0.isReg())
3081 return false;
3082
3083 auto Reg = Src0.getReg();
3084 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
3085 return !isSGPR(mc2PseudoReg(Reg), TRI);
3086}
3087
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00003088bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
3089
3090 const unsigned Opc = Inst.getOpcode();
3091 const MCInstrDesc &Desc = MII.get(Opc);
3092
3093 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
3094 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00003095
Nicolai Haehnlef2674312018-06-21 13:36:01 +00003096 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
3097 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
3098 if (isCI() || isSI())
3099 return false;
3100 }
3101
3102 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00003103}
3104
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003105bool AMDGPUAsmParser::validateMIMGDim(const MCInst &Inst) {
3106 const unsigned Opc = Inst.getOpcode();
3107 const MCInstrDesc &Desc = MII.get(Opc);
3108
3109 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
3110 return true;
3111
3112 int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
3113 if (DimIdx < 0)
3114 return true;
3115
3116 long Imm = Inst.getOperand(DimIdx).getImm();
3117 if (Imm < 0 || Imm >= 8)
3118 return false;
3119
3120 return true;
3121}
3122
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003123static bool IsRevOpcode(const unsigned Opcode)
3124{
3125 switch (Opcode) {
3126 case AMDGPU::V_SUBREV_F32_e32:
3127 case AMDGPU::V_SUBREV_F32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003128 case AMDGPU::V_SUBREV_F32_e32_gfx10:
3129 case AMDGPU::V_SUBREV_F32_e32_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003130 case AMDGPU::V_SUBREV_F32_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003131 case AMDGPU::V_SUBREV_F32_e64_gfx10:
3132 case AMDGPU::V_SUBREV_F32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003133 case AMDGPU::V_SUBREV_F32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003134
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003135 case AMDGPU::V_SUBREV_I32_e32:
3136 case AMDGPU::V_SUBREV_I32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003137 case AMDGPU::V_SUBREV_I32_e32_gfx6_gfx7:
3138 case AMDGPU::V_SUBREV_I32_e64_gfx6_gfx7:
3139
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003140 case AMDGPU::V_SUBBREV_U32_e32:
3141 case AMDGPU::V_SUBBREV_U32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003142 case AMDGPU::V_SUBBREV_U32_e32_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003143 case AMDGPU::V_SUBBREV_U32_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003144 case AMDGPU::V_SUBBREV_U32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003145 case AMDGPU::V_SUBBREV_U32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003146
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003147 case AMDGPU::V_SUBREV_U32_e32:
3148 case AMDGPU::V_SUBREV_U32_e64:
3149 case AMDGPU::V_SUBREV_U32_e32_gfx9:
3150 case AMDGPU::V_SUBREV_U32_e32_vi:
3151 case AMDGPU::V_SUBREV_U32_e64_gfx9:
3152 case AMDGPU::V_SUBREV_U32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003153
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003154 case AMDGPU::V_SUBREV_F16_e32:
3155 case AMDGPU::V_SUBREV_F16_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003156 case AMDGPU::V_SUBREV_F16_e32_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003157 case AMDGPU::V_SUBREV_F16_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003158 case AMDGPU::V_SUBREV_F16_e64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003159 case AMDGPU::V_SUBREV_F16_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003160
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003161 case AMDGPU::V_SUBREV_U16_e32:
3162 case AMDGPU::V_SUBREV_U16_e64:
3163 case AMDGPU::V_SUBREV_U16_e32_vi:
3164 case AMDGPU::V_SUBREV_U16_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003165
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003166 case AMDGPU::V_SUBREV_CO_U32_e32_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003167 case AMDGPU::V_SUBREV_CO_U32_e64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003168 case AMDGPU::V_SUBREV_CO_U32_e64_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003169
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003170 case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9:
3171 case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003172
3173 case AMDGPU::V_SUBREV_NC_U32_e32_gfx10:
3174 case AMDGPU::V_SUBREV_NC_U32_e64_gfx10:
3175
3176 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
3177 case AMDGPU::V_SUBREV_CO_CI_U32_e64_gfx10:
3178
3179 case AMDGPU::V_LSHRREV_B32_e32:
3180 case AMDGPU::V_LSHRREV_B32_e64:
3181 case AMDGPU::V_LSHRREV_B32_e32_gfx6_gfx7:
3182 case AMDGPU::V_LSHRREV_B32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003183 case AMDGPU::V_LSHRREV_B32_e32_vi:
3184 case AMDGPU::V_LSHRREV_B32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003185 case AMDGPU::V_LSHRREV_B32_e32_gfx10:
3186 case AMDGPU::V_LSHRREV_B32_e64_gfx10:
3187
3188 case AMDGPU::V_ASHRREV_I32_e32:
3189 case AMDGPU::V_ASHRREV_I32_e64:
3190 case AMDGPU::V_ASHRREV_I32_e32_gfx10:
3191 case AMDGPU::V_ASHRREV_I32_e32_gfx6_gfx7:
3192 case AMDGPU::V_ASHRREV_I32_e32_vi:
3193 case AMDGPU::V_ASHRREV_I32_e64_gfx10:
3194 case AMDGPU::V_ASHRREV_I32_e64_gfx6_gfx7:
3195 case AMDGPU::V_ASHRREV_I32_e64_vi:
3196
3197 case AMDGPU::V_LSHLREV_B32_e32:
3198 case AMDGPU::V_LSHLREV_B32_e64:
3199 case AMDGPU::V_LSHLREV_B32_e32_gfx10:
3200 case AMDGPU::V_LSHLREV_B32_e32_gfx6_gfx7:
3201 case AMDGPU::V_LSHLREV_B32_e32_vi:
3202 case AMDGPU::V_LSHLREV_B32_e64_gfx10:
3203 case AMDGPU::V_LSHLREV_B32_e64_gfx6_gfx7:
3204 case AMDGPU::V_LSHLREV_B32_e64_vi:
3205
3206 case AMDGPU::V_LSHLREV_B16_e32:
3207 case AMDGPU::V_LSHLREV_B16_e64:
3208 case AMDGPU::V_LSHLREV_B16_e32_vi:
3209 case AMDGPU::V_LSHLREV_B16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003210 case AMDGPU::V_LSHLREV_B16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003211
3212 case AMDGPU::V_LSHRREV_B16_e32:
3213 case AMDGPU::V_LSHRREV_B16_e64:
3214 case AMDGPU::V_LSHRREV_B16_e32_vi:
3215 case AMDGPU::V_LSHRREV_B16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003216 case AMDGPU::V_LSHRREV_B16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003217
3218 case AMDGPU::V_ASHRREV_I16_e32:
3219 case AMDGPU::V_ASHRREV_I16_e64:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003220 case AMDGPU::V_ASHRREV_I16_e32_vi:
3221 case AMDGPU::V_ASHRREV_I16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003222 case AMDGPU::V_ASHRREV_I16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003223
3224 case AMDGPU::V_LSHLREV_B64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003225 case AMDGPU::V_LSHLREV_B64_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003226 case AMDGPU::V_LSHLREV_B64_vi:
3227
3228 case AMDGPU::V_LSHRREV_B64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003229 case AMDGPU::V_LSHRREV_B64_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003230 case AMDGPU::V_LSHRREV_B64_vi:
3231
3232 case AMDGPU::V_ASHRREV_I64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003233 case AMDGPU::V_ASHRREV_I64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003234 case AMDGPU::V_ASHRREV_I64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003235
3236 case AMDGPU::V_PK_LSHLREV_B16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003237 case AMDGPU::V_PK_LSHLREV_B16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003238 case AMDGPU::V_PK_LSHLREV_B16_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003239
3240 case AMDGPU::V_PK_LSHRREV_B16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003241 case AMDGPU::V_PK_LSHRREV_B16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003242 case AMDGPU::V_PK_LSHRREV_B16_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00003243 case AMDGPU::V_PK_ASHRREV_I16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00003244 case AMDGPU::V_PK_ASHRREV_I16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003245 case AMDGPU::V_PK_ASHRREV_I16_vi:
3246 return true;
3247 default:
3248 return false;
3249 }
3250}
3251
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00003252bool AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
3253
3254 using namespace SIInstrFlags;
3255 const unsigned Opcode = Inst.getOpcode();
3256 const MCInstrDesc &Desc = MII.get(Opcode);
3257
3258 // lds_direct register is defined so that it can be used
3259 // with 9-bit operands only. Ignore encodings which do not accept these.
3260 if ((Desc.TSFlags & (VOP1 | VOP2 | VOP3 | VOPC | VOP3P | SIInstrFlags::SDWA)) == 0)
3261 return true;
3262
3263 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3264 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3265 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
3266
3267 const int SrcIndices[] = { Src1Idx, Src2Idx };
3268
3269 // lds_direct cannot be specified as either src1 or src2.
3270 for (int SrcIdx : SrcIndices) {
3271 if (SrcIdx == -1) break;
3272 const MCOperand &Src = Inst.getOperand(SrcIdx);
3273 if (Src.isReg() && Src.getReg() == LDS_DIRECT) {
3274 return false;
3275 }
3276 }
3277
3278 if (Src0Idx == -1)
3279 return true;
3280
3281 const MCOperand &Src = Inst.getOperand(Src0Idx);
3282 if (!Src.isReg() || Src.getReg() != LDS_DIRECT)
3283 return true;
3284
3285 // lds_direct is specified as src0. Check additional limitations.
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00003286 return (Desc.TSFlags & SIInstrFlags::SDWA) == 0 && !IsRevOpcode(Opcode);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00003287}
3288
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00003289SMLoc AMDGPUAsmParser::getFlatOffsetLoc(const OperandVector &Operands) const {
3290 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3291 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3292 if (Op.isFlatOffset())
3293 return Op.getStartLoc();
3294 }
3295 return getLoc();
3296}
3297
3298bool AMDGPUAsmParser::validateFlatOffset(const MCInst &Inst,
3299 const OperandVector &Operands) {
3300 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
3301 if ((TSFlags & SIInstrFlags::FLAT) == 0)
3302 return true;
3303
3304 auto Opcode = Inst.getOpcode();
3305 auto OpNum = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::offset);
3306 assert(OpNum != -1);
3307
3308 const auto &Op = Inst.getOperand(OpNum);
3309 if (!hasFlatOffsets() && Op.getImm() != 0) {
3310 Error(getFlatOffsetLoc(Operands),
3311 "flat offset modifier is not supported on this GPU");
3312 return false;
3313 }
3314
3315 // Address offset is 12-bit signed for GFX10, 13-bit for GFX9.
3316 // For FLAT segment the offset must be positive;
3317 // MSB is ignored and forced to zero.
3318 unsigned OffsetSize = isGFX9() ? 13 : 12;
3319 if (TSFlags & SIInstrFlags::IsNonFlatSeg) {
3320 if (!isIntN(OffsetSize, Op.getImm())) {
3321 Error(getFlatOffsetLoc(Operands),
3322 isGFX9() ? "expected a 13-bit signed offset" :
3323 "expected a 12-bit signed offset");
3324 return false;
3325 }
3326 } else {
3327 if (!isUIntN(OffsetSize - 1, Op.getImm())) {
3328 Error(getFlatOffsetLoc(Operands),
3329 isGFX9() ? "expected a 12-bit unsigned offset" :
3330 "expected an 11-bit unsigned offset");
3331 return false;
3332 }
3333 }
3334
3335 return true;
3336}
3337
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00003338bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst) const {
3339 unsigned Opcode = Inst.getOpcode();
3340 const MCInstrDesc &Desc = MII.get(Opcode);
3341 if (!(Desc.TSFlags & (SIInstrFlags::SOP2 | SIInstrFlags::SOPC)))
3342 return true;
3343
3344 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3345 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3346
3347 const int OpIndices[] = { Src0Idx, Src1Idx };
3348
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003349 unsigned NumExprs = 0;
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00003350 unsigned NumLiterals = 0;
3351 uint32_t LiteralValue;
3352
3353 for (int OpIdx : OpIndices) {
3354 if (OpIdx == -1) break;
3355
3356 const MCOperand &MO = Inst.getOperand(OpIdx);
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003357 // Exclude special imm operands (like that used by s_set_gpr_idx_on)
3358 if (AMDGPU::isSISrcOperand(Desc, OpIdx)) {
3359 if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
3360 uint32_t Value = static_cast<uint32_t>(MO.getImm());
3361 if (NumLiterals == 0 || LiteralValue != Value) {
3362 LiteralValue = Value;
3363 ++NumLiterals;
3364 }
3365 } else if (MO.isExpr()) {
3366 ++NumExprs;
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00003367 }
3368 }
3369 }
3370
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003371 return NumLiterals + NumExprs <= 1;
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00003372}
3373
Stanislav Mekhanoshin5f581c92019-06-12 17:52:51 +00003374bool AMDGPUAsmParser::validateOpSel(const MCInst &Inst) {
3375 const unsigned Opc = Inst.getOpcode();
3376 if (Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
3377 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10) {
3378 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
3379 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
3380
3381 if (OpSel & ~3)
3382 return false;
3383 }
3384 return true;
3385}
3386
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00003387// Check if VCC register matches wavefront size
3388bool AMDGPUAsmParser::validateVccOperand(unsigned Reg) const {
3389 auto FB = getFeatureBits();
3390 return (FB[AMDGPU::FeatureWavefrontSize64] && Reg == AMDGPU::VCC) ||
3391 (FB[AMDGPU::FeatureWavefrontSize32] && Reg == AMDGPU::VCC_LO);
3392}
3393
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003394// VOP3 literal is only allowed in GFX10+ and only one can be used
3395bool AMDGPUAsmParser::validateVOP3Literal(const MCInst &Inst) const {
3396 unsigned Opcode = Inst.getOpcode();
3397 const MCInstrDesc &Desc = MII.get(Opcode);
3398 if (!(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)))
3399 return true;
3400
3401 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3402 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3403 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
3404
3405 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
3406
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003407 unsigned NumExprs = 0;
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003408 unsigned NumLiterals = 0;
3409 uint32_t LiteralValue;
3410
3411 for (int OpIdx : OpIndices) {
3412 if (OpIdx == -1) break;
3413
3414 const MCOperand &MO = Inst.getOperand(OpIdx);
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003415 if (!MO.isImm() && !MO.isExpr())
3416 continue;
3417 if (!AMDGPU::isSISrcOperand(Desc, OpIdx))
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003418 continue;
3419
Stanislav Mekhanoshinb37d6a72019-08-23 22:22:49 +00003420 if (OpIdx == Src2Idx && (Desc.TSFlags & SIInstrFlags::IsMAI) &&
3421 getFeatureBits()[AMDGPU::FeatureMFMAInlineLiteralBug])
3422 return false;
3423
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003424 if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003425 uint32_t Value = static_cast<uint32_t>(MO.getImm());
3426 if (NumLiterals == 0 || LiteralValue != Value) {
3427 LiteralValue = Value;
3428 ++NumLiterals;
3429 }
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003430 } else if (MO.isExpr()) {
3431 ++NumExprs;
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003432 }
3433 }
Dmitry Preobrazhensky6784a3c2019-09-23 15:41:51 +00003434 NumLiterals += NumExprs;
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003435
3436 return !NumLiterals ||
3437 (NumLiterals == 1 && getFeatureBits()[AMDGPU::FeatureVOP3Literal]);
3438}
3439
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003440bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00003441 const SMLoc &IDLoc,
3442 const OperandVector &Operands) {
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00003443 if (!validateLdsDirect(Inst)) {
3444 Error(IDLoc,
3445 "invalid use of lds_direct");
3446 return false;
3447 }
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00003448 if (!validateSOPLiteral(Inst)) {
3449 Error(IDLoc,
3450 "only one literal operand is allowed");
3451 return false;
3452 }
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00003453 if (!validateVOP3Literal(Inst)) {
3454 Error(IDLoc,
3455 "invalid literal operand");
3456 return false;
3457 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003458 if (!validateConstantBusLimitations(Inst)) {
3459 Error(IDLoc,
3460 "invalid operand (violates constant bus restrictions)");
3461 return false;
3462 }
3463 if (!validateEarlyClobberLimitations(Inst)) {
3464 Error(IDLoc,
3465 "destination must be different than all sources");
3466 return false;
3467 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00003468 if (!validateIntClampSupported(Inst)) {
3469 Error(IDLoc,
3470 "integer clamping is not supported on this GPU");
3471 return false;
3472 }
Stanislav Mekhanoshin5f581c92019-06-12 17:52:51 +00003473 if (!validateOpSel(Inst)) {
3474 Error(IDLoc,
3475 "invalid op_sel operand");
3476 return false;
3477 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00003478 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
3479 if (!validateMIMGD16(Inst)) {
3480 Error(IDLoc,
3481 "d16 modifier is not supported on this GPU");
3482 return false;
3483 }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003484 if (!validateMIMGDim(Inst)) {
3485 Error(IDLoc, "dim modifier is required on this GPU");
3486 return false;
3487 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00003488 if (!validateMIMGDataSize(Inst)) {
3489 Error(IDLoc,
3490 "image data size does not match dmask and tfe");
3491 return false;
3492 }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003493 if (!validateMIMGAddrSize(Inst)) {
3494 Error(IDLoc,
3495 "image address size does not match dim and a16");
3496 return false;
3497 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00003498 if (!validateMIMGAtomicDMask(Inst)) {
3499 Error(IDLoc,
3500 "invalid atomic image dmask");
3501 return false;
3502 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00003503 if (!validateMIMGGatherDMask(Inst)) {
3504 Error(IDLoc,
3505 "invalid image_gather dmask: only one bit must be set");
3506 return false;
3507 }
Dmitry Preobrazhenskyedd9f702019-11-18 17:23:40 +03003508 if (!validateMovrels(Inst)) {
3509 Error(IDLoc, "source operand must be a VGPR");
3510 return false;
3511 }
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00003512 if (!validateFlatOffset(Inst, Operands)) {
3513 return false;
3514 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003515
3516 return true;
3517}
3518
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00003519static std::string AMDGPUMnemonicSpellCheck(StringRef S,
3520 const FeatureBitset &FBS,
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00003521 unsigned VariantID = 0);
3522
Tom Stellard45bb48e2015-06-13 03:28:10 +00003523bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3524 OperandVector &Operands,
3525 MCStreamer &Out,
3526 uint64_t &ErrorInfo,
3527 bool MatchingInlineAsm) {
3528 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00003529 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00003530 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00003531 uint64_t EI;
3532 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
3533 Variant);
3534 // We order match statuses from least to most specific. We use most specific
3535 // status as resulting
3536 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
3537 if ((R == Match_Success) ||
3538 (R == Match_PreferE32) ||
3539 (R == Match_MissingFeature && Result != Match_PreferE32) ||
3540 (R == Match_InvalidOperand && Result != Match_MissingFeature
3541 && Result != Match_PreferE32) ||
3542 (R == Match_MnemonicFail && Result != Match_InvalidOperand
3543 && Result != Match_MissingFeature
3544 && Result != Match_PreferE32)) {
3545 Result = R;
3546 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003547 }
Sam Koltond63d8a72016-09-09 09:37:51 +00003548 if (R == Match_Success)
3549 break;
3550 }
3551
3552 switch (Result) {
3553 default: break;
3554 case Match_Success:
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00003555 if (!validateInstruction(Inst, IDLoc, Operands)) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00003556 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00003557 }
Sam Koltond63d8a72016-09-09 09:37:51 +00003558 Inst.setLoc(IDLoc);
3559 Out.EmitInstruction(Inst, getSTI());
3560 return false;
3561
3562 case Match_MissingFeature:
3563 return Error(IDLoc, "instruction not supported on this GPU");
3564
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00003565 case Match_MnemonicFail: {
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00003566 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00003567 std::string Suggestion = AMDGPUMnemonicSpellCheck(
3568 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
3569 return Error(IDLoc, "invalid instruction" + Suggestion,
3570 ((AMDGPUOperand &)*Operands[0]).getLocRange());
3571 }
Sam Koltond63d8a72016-09-09 09:37:51 +00003572
3573 case Match_InvalidOperand: {
3574 SMLoc ErrorLoc = IDLoc;
3575 if (ErrorInfo != ~0ULL) {
3576 if (ErrorInfo >= Operands.size()) {
3577 return Error(IDLoc, "too few operands for instruction");
3578 }
3579 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
3580 if (ErrorLoc == SMLoc())
3581 ErrorLoc = IDLoc;
3582 }
3583 return Error(ErrorLoc, "invalid operand for instruction");
3584 }
3585
3586 case Match_PreferE32:
3587 return Error(IDLoc, "internal error: instruction without _e64 suffix "
3588 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003589 }
3590 llvm_unreachable("Implement any new match types added!");
3591}
3592
Artem Tamazov25478d82016-12-29 15:41:52 +00003593bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
3594 int64_t Tmp = -1;
3595 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
3596 return true;
3597 }
3598 if (getParser().parseAbsoluteExpression(Tmp)) {
3599 return true;
3600 }
3601 Ret = static_cast<uint32_t>(Tmp);
3602 return false;
3603}
3604
Tom Stellard347ac792015-06-26 21:15:07 +00003605bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
3606 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00003607 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00003608 return TokError("invalid major version");
3609
Tom Stellard347ac792015-06-26 21:15:07 +00003610 if (getLexer().isNot(AsmToken::Comma))
3611 return TokError("minor version number required, comma expected");
3612 Lex();
3613
Artem Tamazov25478d82016-12-29 15:41:52 +00003614 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00003615 return TokError("invalid minor version");
3616
Tom Stellard347ac792015-06-26 21:15:07 +00003617 return false;
3618}
3619
Scott Linder1e8c2c72018-06-21 19:38:56 +00003620bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
3621 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
3622 return TokError("directive only supported for amdgcn architecture");
3623
3624 std::string Target;
3625
3626 SMLoc TargetStart = getTok().getLoc();
3627 if (getParser().parseEscapedString(Target))
3628 return true;
3629 SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
3630
3631 std::string ExpectedTarget;
3632 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
3633 IsaInfo::streamIsaVersion(&getSTI(), ExpectedTargetOS);
3634
3635 if (Target != ExpectedTargetOS.str())
3636 return getParser().Error(TargetRange.Start, "target must match options",
3637 TargetRange);
3638
3639 getTargetStreamer().EmitDirectiveAMDGCNTarget(Target);
3640 return false;
3641}
3642
3643bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
3644 return getParser().Error(Range.Start, "value out of range", Range);
3645}
3646
3647bool AMDGPUAsmParser::calculateGPRBlocks(
3648 const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00003649 bool XNACKUsed, Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
3650 SMRange VGPRRange, unsigned NextFreeSGPR, SMRange SGPRRange,
3651 unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
Scott Linder1e8c2c72018-06-21 19:38:56 +00003652 // TODO(scott.linder): These calculations are duplicated from
3653 // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003654 IsaVersion Version = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003655
3656 unsigned NumVGPRs = NextFreeVGPR;
3657 unsigned NumSGPRs = NextFreeSGPR;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003658
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003659 if (Version.Major >= 10)
3660 NumSGPRs = 0;
3661 else {
3662 unsigned MaxAddressableNumSGPRs =
3663 IsaInfo::getAddressableNumSGPRs(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003664
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003665 if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
3666 NumSGPRs > MaxAddressableNumSGPRs)
3667 return OutOfRangeError(SGPRRange);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003668
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003669 NumSGPRs +=
3670 IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003671
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003672 if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
3673 NumSGPRs > MaxAddressableNumSGPRs)
3674 return OutOfRangeError(SGPRRange);
3675
3676 if (Features.test(FeatureSGPRInitBug))
3677 NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
3678 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00003679
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00003680 VGPRBlocks =
3681 IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs, EnableWavefrontSize32);
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003682 SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003683
3684 return false;
3685}
3686
3687bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
3688 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
3689 return TokError("directive only supported for amdgcn architecture");
3690
3691 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
3692 return TokError("directive only supported for amdhsa OS");
3693
3694 StringRef KernelName;
3695 if (getParser().parseIdentifier(KernelName))
3696 return true;
3697
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00003698 kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003699
3700 StringSet<> Seen;
3701
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003702 IsaVersion IVersion = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003703
3704 SMRange VGPRRange;
3705 uint64_t NextFreeVGPR = 0;
3706 SMRange SGPRRange;
3707 uint64_t NextFreeSGPR = 0;
3708 unsigned UserSGPRCount = 0;
3709 bool ReserveVCC = true;
3710 bool ReserveFlatScr = true;
3711 bool ReserveXNACK = hasXNACK();
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00003712 Optional<bool> EnableWavefrontSize32;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003713
3714 while (true) {
3715 while (getLexer().is(AsmToken::EndOfStatement))
3716 Lex();
3717
3718 if (getLexer().isNot(AsmToken::Identifier))
3719 return TokError("expected .amdhsa_ directive or .end_amdhsa_kernel");
3720
3721 StringRef ID = getTok().getIdentifier();
3722 SMRange IDRange = getTok().getLocRange();
3723 Lex();
3724
3725 if (ID == ".end_amdhsa_kernel")
3726 break;
3727
3728 if (Seen.find(ID) != Seen.end())
3729 return TokError(".amdhsa_ directives cannot be repeated");
3730 Seen.insert(ID);
3731
3732 SMLoc ValStart = getTok().getLoc();
3733 int64_t IVal;
3734 if (getParser().parseAbsoluteExpression(IVal))
3735 return true;
3736 SMLoc ValEnd = getTok().getLoc();
3737 SMRange ValRange = SMRange(ValStart, ValEnd);
3738
3739 if (IVal < 0)
3740 return OutOfRangeError(ValRange);
3741
3742 uint64_t Val = IVal;
3743
3744#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
3745 if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
3746 return OutOfRangeError(RANGE); \
3747 AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
3748
3749 if (ID == ".amdhsa_group_segment_fixed_size") {
3750 if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
3751 return OutOfRangeError(ValRange);
3752 KD.group_segment_fixed_size = Val;
3753 } else if (ID == ".amdhsa_private_segment_fixed_size") {
3754 if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
3755 return OutOfRangeError(ValRange);
3756 KD.private_segment_fixed_size = Val;
3757 } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
3758 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3759 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
3760 Val, ValRange);
Scott Linder04f6f252019-08-28 19:38:15 +00003761 if (Val)
3762 UserSGPRCount += 4;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003763 } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
3764 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3765 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
3766 ValRange);
Scott Linder04f6f252019-08-28 19:38:15 +00003767 if (Val)
3768 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003769 } else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
3770 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3771 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
3772 ValRange);
Scott Linder04f6f252019-08-28 19:38:15 +00003773 if (Val)
3774 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003775 } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
3776 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3777 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
3778 Val, ValRange);
Scott Linder04f6f252019-08-28 19:38:15 +00003779 if (Val)
3780 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003781 } else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
3782 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3783 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
3784 ValRange);
Scott Linder04f6f252019-08-28 19:38:15 +00003785 if (Val)
3786 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003787 } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
3788 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3789 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
3790 ValRange);
Scott Linder04f6f252019-08-28 19:38:15 +00003791 if (Val)
3792 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003793 } else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
3794 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3795 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
3796 Val, ValRange);
Scott Linder04f6f252019-08-28 19:38:15 +00003797 if (Val)
3798 UserSGPRCount += 1;
Stanislav Mekhanoshin5d00c302019-06-17 16:48:56 +00003799 } else if (ID == ".amdhsa_wavefront_size32") {
3800 if (IVersion.Major < 10)
3801 return getParser().Error(IDRange.Start, "directive requires gfx10+",
3802 IDRange);
3803 EnableWavefrontSize32 = Val;
3804 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3805 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
3806 Val, ValRange);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003807 } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
3808 PARSE_BITS_ENTRY(
3809 KD.compute_pgm_rsrc2,
3810 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET, Val,
3811 ValRange);
3812 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
3813 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3814 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
3815 ValRange);
3816 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
3817 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3818 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
3819 ValRange);
3820 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
3821 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3822 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
3823 ValRange);
3824 } else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
3825 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3826 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
3827 ValRange);
3828 } else if (ID == ".amdhsa_system_vgpr_workitem_id") {
3829 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3830 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
3831 ValRange);
3832 } else if (ID == ".amdhsa_next_free_vgpr") {
3833 VGPRRange = ValRange;
3834 NextFreeVGPR = Val;
3835 } else if (ID == ".amdhsa_next_free_sgpr") {
3836 SGPRRange = ValRange;
3837 NextFreeSGPR = Val;
3838 } else if (ID == ".amdhsa_reserve_vcc") {
3839 if (!isUInt<1>(Val))
3840 return OutOfRangeError(ValRange);
3841 ReserveVCC = Val;
3842 } else if (ID == ".amdhsa_reserve_flat_scratch") {
3843 if (IVersion.Major < 7)
3844 return getParser().Error(IDRange.Start, "directive requires gfx7+",
3845 IDRange);
3846 if (!isUInt<1>(Val))
3847 return OutOfRangeError(ValRange);
3848 ReserveFlatScr = Val;
3849 } else if (ID == ".amdhsa_reserve_xnack_mask") {
3850 if (IVersion.Major < 8)
3851 return getParser().Error(IDRange.Start, "directive requires gfx8+",
3852 IDRange);
3853 if (!isUInt<1>(Val))
3854 return OutOfRangeError(ValRange);
3855 ReserveXNACK = Val;
3856 } else if (ID == ".amdhsa_float_round_mode_32") {
3857 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3858 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
3859 } else if (ID == ".amdhsa_float_round_mode_16_64") {
3860 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3861 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
3862 } else if (ID == ".amdhsa_float_denorm_mode_32") {
3863 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3864 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
3865 } else if (ID == ".amdhsa_float_denorm_mode_16_64") {
3866 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3867 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
3868 ValRange);
3869 } else if (ID == ".amdhsa_dx10_clamp") {
3870 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3871 COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
3872 } else if (ID == ".amdhsa_ieee_mode") {
3873 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
3874 Val, ValRange);
3875 } else if (ID == ".amdhsa_fp16_overflow") {
3876 if (IVersion.Major < 9)
3877 return getParser().Error(IDRange.Start, "directive requires gfx9+",
3878 IDRange);
3879 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
3880 ValRange);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00003881 } else if (ID == ".amdhsa_workgroup_processor_mode") {
3882 if (IVersion.Major < 10)
3883 return getParser().Error(IDRange.Start, "directive requires gfx10+",
3884 IDRange);
3885 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_WGP_MODE, Val,
3886 ValRange);
3887 } else if (ID == ".amdhsa_memory_ordered") {
3888 if (IVersion.Major < 10)
3889 return getParser().Error(IDRange.Start, "directive requires gfx10+",
3890 IDRange);
3891 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_MEM_ORDERED, Val,
3892 ValRange);
3893 } else if (ID == ".amdhsa_forward_progress") {
3894 if (IVersion.Major < 10)
3895 return getParser().Error(IDRange.Start, "directive requires gfx10+",
3896 IDRange);
3897 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FWD_PROGRESS, Val,
3898 ValRange);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003899 } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
3900 PARSE_BITS_ENTRY(
3901 KD.compute_pgm_rsrc2,
3902 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
3903 ValRange);
3904 } else if (ID == ".amdhsa_exception_fp_denorm_src") {
3905 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3906 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
3907 Val, ValRange);
3908 } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
3909 PARSE_BITS_ENTRY(
3910 KD.compute_pgm_rsrc2,
3911 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
3912 ValRange);
3913 } else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
3914 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3915 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
3916 Val, ValRange);
3917 } else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
3918 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3919 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
3920 Val, ValRange);
3921 } else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
3922 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3923 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
3924 Val, ValRange);
3925 } else if (ID == ".amdhsa_exception_int_div_zero") {
3926 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3927 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
3928 Val, ValRange);
3929 } else {
3930 return getParser().Error(IDRange.Start,
3931 "unknown .amdhsa_kernel directive", IDRange);
3932 }
3933
3934#undef PARSE_BITS_ENTRY
3935 }
3936
3937 if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
3938 return TokError(".amdhsa_next_free_vgpr directive is required");
3939
3940 if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
3941 return TokError(".amdhsa_next_free_sgpr directive is required");
3942
3943 unsigned VGPRBlocks;
3944 unsigned SGPRBlocks;
3945 if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00003946 ReserveXNACK, EnableWavefrontSize32, NextFreeVGPR,
3947 VGPRRange, NextFreeSGPR, SGPRRange, VGPRBlocks,
3948 SGPRBlocks))
Scott Linder1e8c2c72018-06-21 19:38:56 +00003949 return true;
3950
3951 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
3952 VGPRBlocks))
3953 return OutOfRangeError(VGPRRange);
3954 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3955 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
3956
3957 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
3958 SGPRBlocks))
3959 return OutOfRangeError(SGPRRange);
3960 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3961 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
3962 SGPRBlocks);
3963
3964 if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
3965 return TokError("too many user SGPRs enabled");
3966 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
3967 UserSGPRCount);
3968
3969 getTargetStreamer().EmitAmdhsaKernelDescriptor(
3970 getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
3971 ReserveFlatScr, ReserveXNACK);
3972 return false;
3973}
3974
Tom Stellard347ac792015-06-26 21:15:07 +00003975bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00003976 uint32_t Major;
3977 uint32_t Minor;
3978
3979 if (ParseDirectiveMajorMinor(Major, Minor))
3980 return true;
3981
3982 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
3983 return false;
3984}
3985
3986bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00003987 uint32_t Major;
3988 uint32_t Minor;
3989 uint32_t Stepping;
3990 StringRef VendorName;
3991 StringRef ArchName;
3992
3993 // If this directive has no arguments, then use the ISA version for the
3994 // targeted GPU.
3995 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003996 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003997 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
3998 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00003999 "AMD", "AMDGPU");
4000 return false;
4001 }
4002
Tom Stellard347ac792015-06-26 21:15:07 +00004003 if (ParseDirectiveMajorMinor(Major, Minor))
4004 return true;
4005
4006 if (getLexer().isNot(AsmToken::Comma))
4007 return TokError("stepping version number required, comma expected");
4008 Lex();
4009
Artem Tamazov25478d82016-12-29 15:41:52 +00004010 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00004011 return TokError("invalid stepping version");
4012
Tom Stellard347ac792015-06-26 21:15:07 +00004013 if (getLexer().isNot(AsmToken::Comma))
4014 return TokError("vendor name required, comma expected");
4015 Lex();
4016
4017 if (getLexer().isNot(AsmToken::String))
4018 return TokError("invalid vendor name");
4019
4020 VendorName = getLexer().getTok().getStringContents();
4021 Lex();
4022
4023 if (getLexer().isNot(AsmToken::Comma))
4024 return TokError("arch name required, comma expected");
4025 Lex();
4026
4027 if (getLexer().isNot(AsmToken::String))
4028 return TokError("invalid arch name");
4029
4030 ArchName = getLexer().getTok().getStringContents();
4031 Lex();
4032
4033 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
4034 VendorName, ArchName);
4035 return false;
4036}
4037
Tom Stellardff7416b2015-06-26 21:58:31 +00004038bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
4039 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00004040 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
4041 // assembly for backwards compatibility.
4042 if (ID == "max_scratch_backing_memory_byte_size") {
4043 Parser.eatToEndOfStatement();
4044 return false;
4045 }
4046
Valery Pykhtindc110542016-03-06 20:25:36 +00004047 SmallString<40> ErrStr;
4048 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00004049 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00004050 return TokError(Err.str());
4051 }
Tom Stellardff7416b2015-06-26 21:58:31 +00004052 Lex();
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004053
Stanislav Mekhanoshin5d00c302019-06-17 16:48:56 +00004054 if (ID == "enable_wavefront_size32") {
4055 if (Header.code_properties & AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32) {
4056 if (!isGFX10())
4057 return TokError("enable_wavefront_size32=1 is only allowed on GFX10+");
4058 if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32])
4059 return TokError("enable_wavefront_size32=1 requires +WavefrontSize32");
4060 } else {
4061 if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64])
4062 return TokError("enable_wavefront_size32=0 requires +WavefrontSize64");
4063 }
4064 }
4065
4066 if (ID == "wavefront_size") {
4067 if (Header.wavefront_size == 5) {
4068 if (!isGFX10())
4069 return TokError("wavefront_size=5 is only allowed on GFX10+");
4070 if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32])
4071 return TokError("wavefront_size=5 requires +WavefrontSize32");
4072 } else if (Header.wavefront_size == 6) {
4073 if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64])
4074 return TokError("wavefront_size=6 requires +WavefrontSize64");
4075 }
4076 }
4077
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004078 if (ID == "enable_wgp_mode") {
4079 if (G_00B848_WGP_MODE(Header.compute_pgm_resource_registers) && !isGFX10())
4080 return TokError("enable_wgp_mode=1 is only allowed on GFX10+");
4081 }
4082
4083 if (ID == "enable_mem_ordered") {
4084 if (G_00B848_MEM_ORDERED(Header.compute_pgm_resource_registers) && !isGFX10())
4085 return TokError("enable_mem_ordered=1 is only allowed on GFX10+");
4086 }
4087
4088 if (ID == "enable_fwd_progress") {
4089 if (G_00B848_FWD_PROGRESS(Header.compute_pgm_resource_registers) && !isGFX10())
4090 return TokError("enable_fwd_progress=1 is only allowed on GFX10+");
4091 }
4092
Tom Stellardff7416b2015-06-26 21:58:31 +00004093 return false;
4094}
4095
4096bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00004097 amd_kernel_code_t Header;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004098 AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
Tom Stellardff7416b2015-06-26 21:58:31 +00004099
4100 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00004101 // Lex EndOfStatement. This is in a while loop, because lexing a comment
4102 // will set the current token to EndOfStatement.
4103 while(getLexer().is(AsmToken::EndOfStatement))
4104 Lex();
4105
4106 if (getLexer().isNot(AsmToken::Identifier))
4107 return TokError("expected value identifier or .end_amd_kernel_code_t");
4108
4109 StringRef ID = getLexer().getTok().getIdentifier();
4110 Lex();
4111
4112 if (ID == ".end_amd_kernel_code_t")
4113 break;
4114
4115 if (ParseAMDKernelCodeTValue(ID, Header))
4116 return true;
4117 }
4118
4119 getTargetStreamer().EmitAMDKernelCodeT(Header);
4120
4121 return false;
4122}
4123
Tom Stellard1e1b05d2015-11-06 11:45:14 +00004124bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
4125 if (getLexer().isNot(AsmToken::Identifier))
4126 return TokError("expected symbol name");
4127
4128 StringRef KernelName = Parser.getTok().getString();
4129
4130 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
4131 ELF::STT_AMDGPU_HSA_KERNEL);
4132 Lex();
Scott Linder1e8c2c72018-06-21 19:38:56 +00004133 if (!AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI()))
4134 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00004135 return false;
4136}
4137
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00004138bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00004139 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
4140 return Error(getParser().getTok().getLoc(),
4141 ".amd_amdgpu_isa directive is not available on non-amdgcn "
4142 "architectures");
4143 }
4144
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00004145 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
4146
4147 std::string ISAVersionStringFromSTI;
4148 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
4149 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
4150
4151 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
4152 return Error(getParser().getTok().getLoc(),
4153 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
4154 "arguments specified through the command line");
4155 }
4156
4157 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
4158 Lex();
4159
4160 return false;
4161}
4162
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00004163bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Scott Linderf5b36e52018-12-12 19:39:27 +00004164 const char *AssemblerDirectiveBegin;
4165 const char *AssemblerDirectiveEnd;
4166 std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) =
4167 AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())
4168 ? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin,
4169 HSAMD::V3::AssemblerDirectiveEnd)
4170 : std::make_tuple(HSAMD::AssemblerDirectiveBegin,
4171 HSAMD::AssemblerDirectiveEnd);
4172
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00004173 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
4174 return Error(getParser().getTok().getLoc(),
Scott Linderf5b36e52018-12-12 19:39:27 +00004175 (Twine(AssemblerDirectiveBegin) + Twine(" directive is "
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00004176 "not available on non-amdhsa OSes")).str());
4177 }
4178
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00004179 std::string HSAMetadataString;
Tim Renoufe7bd52f2019-03-20 18:47:21 +00004180 if (ParseToEndDirective(AssemblerDirectiveBegin, AssemblerDirectiveEnd,
4181 HSAMetadataString))
4182 return true;
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00004183
Scott Linderf5b36e52018-12-12 19:39:27 +00004184 if (IsaInfo::hasCodeObjectV3(&getSTI())) {
4185 if (!getTargetStreamer().EmitHSAMetadataV3(HSAMetadataString))
4186 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
4187 } else {
4188 if (!getTargetStreamer().EmitHSAMetadataV2(HSAMetadataString))
4189 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
4190 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00004191
4192 return false;
4193}
4194
Tim Renoufe7bd52f2019-03-20 18:47:21 +00004195/// Common code to parse out a block of text (typically YAML) between start and
4196/// end directives.
4197bool AMDGPUAsmParser::ParseToEndDirective(const char *AssemblerDirectiveBegin,
4198 const char *AssemblerDirectiveEnd,
4199 std::string &CollectString) {
4200
4201 raw_string_ostream CollectStream(CollectString);
4202
4203 getLexer().setSkipSpace(false);
4204
4205 bool FoundEnd = false;
4206 while (!getLexer().is(AsmToken::Eof)) {
4207 while (getLexer().is(AsmToken::Space)) {
4208 CollectStream << getLexer().getTok().getString();
4209 Lex();
4210 }
4211
4212 if (getLexer().is(AsmToken::Identifier)) {
4213 StringRef ID = getLexer().getTok().getIdentifier();
4214 if (ID == AssemblerDirectiveEnd) {
4215 Lex();
4216 FoundEnd = true;
4217 break;
4218 }
4219 }
4220
4221 CollectStream << Parser.parseStringToEndOfStatement()
4222 << getContext().getAsmInfo()->getSeparatorString();
4223
4224 Parser.eatToEndOfStatement();
4225 }
4226
4227 getLexer().setSkipSpace(true);
4228
4229 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
4230 return TokError(Twine("expected directive ") +
4231 Twine(AssemblerDirectiveEnd) + Twine(" not found"));
4232 }
4233
4234 CollectStream.flush();
4235 return false;
4236}
4237
4238/// Parse the assembler directive for new MsgPack-format PAL metadata.
4239bool AMDGPUAsmParser::ParseDirectivePALMetadataBegin() {
4240 std::string String;
4241 if (ParseToEndDirective(AMDGPU::PALMD::AssemblerDirectiveBegin,
4242 AMDGPU::PALMD::AssemblerDirectiveEnd, String))
4243 return true;
4244
4245 auto PALMetadata = getTargetStreamer().getPALMetadata();
4246 if (!PALMetadata->setFromString(String))
4247 return Error(getParser().getTok().getLoc(), "invalid PAL metadata");
4248 return false;
4249}
4250
4251/// Parse the assembler directive for old linear-format PAL metadata.
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00004252bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00004253 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
4254 return Error(getParser().getTok().getLoc(),
4255 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
4256 "not available on non-amdpal OSes")).str());
4257 }
4258
Tim Renoufd737b552019-03-20 17:42:00 +00004259 auto PALMetadata = getTargetStreamer().getPALMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +00004260 PALMetadata->setLegacy();
Tim Renouf72800f02017-10-03 19:03:52 +00004261 for (;;) {
Tim Renoufd737b552019-03-20 17:42:00 +00004262 uint32_t Key, Value;
4263 if (ParseAsAbsoluteExpression(Key)) {
4264 return TokError(Twine("invalid value in ") +
4265 Twine(PALMD::AssemblerDirective));
4266 }
4267 if (getLexer().isNot(AsmToken::Comma)) {
4268 return TokError(Twine("expected an even number of values in ") +
4269 Twine(PALMD::AssemblerDirective));
4270 }
4271 Lex();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00004272 if (ParseAsAbsoluteExpression(Value)) {
4273 return TokError(Twine("invalid value in ") +
4274 Twine(PALMD::AssemblerDirective));
4275 }
Tim Renoufd737b552019-03-20 17:42:00 +00004276 PALMetadata->setRegister(Key, Value);
Tim Renouf72800f02017-10-03 19:03:52 +00004277 if (getLexer().isNot(AsmToken::Comma))
4278 break;
4279 Lex();
4280 }
Tim Renouf72800f02017-10-03 19:03:52 +00004281 return false;
4282}
4283
Nicolai Haehnle08e8cb52019-06-25 11:51:35 +00004284/// ParseDirectiveAMDGPULDS
4285/// ::= .amdgpu_lds identifier ',' size_expression [',' align_expression]
4286bool AMDGPUAsmParser::ParseDirectiveAMDGPULDS() {
4287 if (getParser().checkForValidSection())
4288 return true;
4289
4290 StringRef Name;
4291 SMLoc NameLoc = getLexer().getLoc();
4292 if (getParser().parseIdentifier(Name))
4293 return TokError("expected identifier in directive");
4294
4295 MCSymbol *Symbol = getContext().getOrCreateSymbol(Name);
4296 if (parseToken(AsmToken::Comma, "expected ','"))
4297 return true;
4298
4299 unsigned LocalMemorySize = AMDGPU::IsaInfo::getLocalMemorySize(&getSTI());
4300
4301 int64_t Size;
4302 SMLoc SizeLoc = getLexer().getLoc();
4303 if (getParser().parseAbsoluteExpression(Size))
4304 return true;
4305 if (Size < 0)
4306 return Error(SizeLoc, "size must be non-negative");
4307 if (Size > LocalMemorySize)
4308 return Error(SizeLoc, "size is too large");
4309
4310 int64_t Align = 4;
4311 if (getLexer().is(AsmToken::Comma)) {
4312 Lex();
4313 SMLoc AlignLoc = getLexer().getLoc();
4314 if (getParser().parseAbsoluteExpression(Align))
4315 return true;
4316 if (Align < 0 || !isPowerOf2_64(Align))
4317 return Error(AlignLoc, "alignment must be a power of two");
4318
4319 // Alignment larger than the size of LDS is possible in theory, as long
4320 // as the linker manages to place to symbol at address 0, but we do want
4321 // to make sure the alignment fits nicely into a 32-bit integer.
4322 if (Align >= 1u << 31)
4323 return Error(AlignLoc, "alignment is too large");
4324 }
4325
4326 if (parseToken(AsmToken::EndOfStatement,
4327 "unexpected token in '.amdgpu_lds' directive"))
4328 return true;
4329
4330 Symbol->redefineIfPossible();
4331 if (!Symbol->isUndefined())
4332 return Error(NameLoc, "invalid symbol redefinition");
4333
4334 getTargetStreamer().emitAMDGPULDS(Symbol, Size, Align);
4335 return false;
4336}
4337
Tom Stellard45bb48e2015-06-13 03:28:10 +00004338bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00004339 StringRef IDVal = DirectiveID.getString();
4340
Scott Linder1e8c2c72018-06-21 19:38:56 +00004341 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
4342 if (IDVal == ".amdgcn_target")
4343 return ParseDirectiveAMDGCNTarget();
Tom Stellard347ac792015-06-26 21:15:07 +00004344
Scott Linder1e8c2c72018-06-21 19:38:56 +00004345 if (IDVal == ".amdhsa_kernel")
4346 return ParseDirectiveAMDHSAKernel();
Scott Linderf5b36e52018-12-12 19:39:27 +00004347
4348 // TODO: Restructure/combine with PAL metadata directive.
4349 if (IDVal == AMDGPU::HSAMD::V3::AssemblerDirectiveBegin)
4350 return ParseDirectiveHSAMetadata();
Scott Linder1e8c2c72018-06-21 19:38:56 +00004351 } else {
4352 if (IDVal == ".hsa_code_object_version")
4353 return ParseDirectiveHSACodeObjectVersion();
Tom Stellard347ac792015-06-26 21:15:07 +00004354
Scott Linder1e8c2c72018-06-21 19:38:56 +00004355 if (IDVal == ".hsa_code_object_isa")
4356 return ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00004357
Scott Linder1e8c2c72018-06-21 19:38:56 +00004358 if (IDVal == ".amd_kernel_code_t")
4359 return ParseDirectiveAMDKernelCodeT();
Tom Stellard1e1b05d2015-11-06 11:45:14 +00004360
Scott Linder1e8c2c72018-06-21 19:38:56 +00004361 if (IDVal == ".amdgpu_hsa_kernel")
4362 return ParseDirectiveAMDGPUHsaKernel();
4363
4364 if (IDVal == ".amd_amdgpu_isa")
4365 return ParseDirectiveISAVersion();
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00004366
Scott Linderf5b36e52018-12-12 19:39:27 +00004367 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
4368 return ParseDirectiveHSAMetadata();
4369 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00004370
Nicolai Haehnle08e8cb52019-06-25 11:51:35 +00004371 if (IDVal == ".amdgpu_lds")
4372 return ParseDirectiveAMDGPULDS();
4373
Tim Renoufe7bd52f2019-03-20 18:47:21 +00004374 if (IDVal == PALMD::AssemblerDirectiveBegin)
4375 return ParseDirectivePALMetadataBegin();
4376
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00004377 if (IDVal == PALMD::AssemblerDirective)
4378 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00004379
Tom Stellard45bb48e2015-06-13 03:28:10 +00004380 return true;
4381}
4382
Matt Arsenault68802d32015-11-05 03:11:27 +00004383bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
4384 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00004385
4386 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
4387 R.isValid(); ++R) {
4388 if (*R == RegNo)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004389 return isGFX9() || isGFX10();
4390 }
4391
4392 // GFX10 has 2 more SGPRs 104 and 105.
4393 for (MCRegAliasIterator R(AMDGPU::SGPR104_SGPR105, &MRI, true);
4394 R.isValid(); ++R) {
4395 if (*R == RegNo)
4396 return hasSGPR104_SGPR105();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00004397 }
4398
4399 switch (RegNo) {
Dmitry Preobrazhensky9111f352019-06-03 13:51:24 +00004400 case AMDGPU::SRC_SHARED_BASE:
4401 case AMDGPU::SRC_SHARED_LIMIT:
4402 case AMDGPU::SRC_PRIVATE_BASE:
4403 case AMDGPU::SRC_PRIVATE_LIMIT:
4404 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
4405 return !isCI() && !isSI() && !isVI();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00004406 case AMDGPU::TBA:
4407 case AMDGPU::TBA_LO:
4408 case AMDGPU::TBA_HI:
4409 case AMDGPU::TMA:
4410 case AMDGPU::TMA_LO:
4411 case AMDGPU::TMA_HI:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004412 return !isGFX9() && !isGFX10();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00004413 case AMDGPU::XNACK_MASK:
4414 case AMDGPU::XNACK_MASK_LO:
4415 case AMDGPU::XNACK_MASK_HI:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004416 return !isCI() && !isSI() && !isGFX10() && hasXNACK();
4417 case AMDGPU::SGPR_NULL:
4418 return isGFX10();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00004419 default:
4420 break;
4421 }
4422
Matt Arsenault3b159672015-12-01 20:31:08 +00004423 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00004424 return true;
4425
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004426 if (isSI() || isGFX10()) {
4427 // No flat_scr on SI.
4428 // On GFX10 flat scratch is not a valid register operand and can only be
4429 // accessed with s_setreg/s_getreg.
Matt Arsenault3b159672015-12-01 20:31:08 +00004430 switch (RegNo) {
4431 case AMDGPU::FLAT_SCR:
4432 case AMDGPU::FLAT_SCR_LO:
4433 case AMDGPU::FLAT_SCR_HI:
4434 return false;
4435 default:
4436 return true;
4437 }
4438 }
4439
Matt Arsenault68802d32015-11-05 03:11:27 +00004440 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
4441 // SI/CI have.
4442 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
4443 R.isValid(); ++R) {
4444 if (*R == RegNo)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004445 return hasSGPR102_SGPR103();
Matt Arsenault68802d32015-11-05 03:11:27 +00004446 }
4447
4448 return true;
4449}
4450
Alex Bradbury58eba092016-11-01 16:32:05 +00004451OperandMatchResultTy
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004452AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic,
4453 OperandMode Mode) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00004454 // Try to parse with a custom parser
4455 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4456
4457 // If we successfully parsed the operand or if there as an error parsing,
4458 // we are done.
4459 //
4460 // If we are parsing after we reach EndOfStatement then this means we
4461 // are appending default values to the Operands list. This is only done
4462 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00004463 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00004464 getLexer().is(AsmToken::EndOfStatement))
4465 return ResTy;
4466
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004467 if (Mode == OperandMode_NSA && getLexer().is(AsmToken::LBrac)) {
4468 unsigned Prefix = Operands.size();
4469 SMLoc LBraceLoc = getTok().getLoc();
4470 Parser.Lex(); // eat the '['
4471
4472 for (;;) {
4473 ResTy = parseReg(Operands);
4474 if (ResTy != MatchOperand_Success)
4475 return ResTy;
4476
4477 if (getLexer().is(AsmToken::RBrac))
4478 break;
4479
4480 if (getLexer().isNot(AsmToken::Comma))
4481 return MatchOperand_ParseFail;
4482 Parser.Lex();
4483 }
4484
4485 if (Operands.size() - Prefix > 1) {
4486 Operands.insert(Operands.begin() + Prefix,
4487 AMDGPUOperand::CreateToken(this, "[", LBraceLoc));
4488 Operands.push_back(AMDGPUOperand::CreateToken(this, "]",
4489 getTok().getLoc()));
4490 }
4491
4492 Parser.Lex(); // eat the ']'
4493 return MatchOperand_Success;
4494 }
4495
Dmitry Preobrazhensky43fcc792019-05-17 13:17:48 +00004496 return parseRegOrImm(Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004497}
4498
Sam Kolton05ef1c92016-06-03 10:27:37 +00004499StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
4500 // Clear any forced encodings from the previous instruction.
4501 setForcedEncodingSize(0);
4502 setForcedDPP(false);
4503 setForcedSDWA(false);
4504
4505 if (Name.endswith("_e64")) {
4506 setForcedEncodingSize(64);
4507 return Name.substr(0, Name.size() - 4);
4508 } else if (Name.endswith("_e32")) {
4509 setForcedEncodingSize(32);
4510 return Name.substr(0, Name.size() - 4);
4511 } else if (Name.endswith("_dpp")) {
4512 setForcedDPP(true);
4513 return Name.substr(0, Name.size() - 4);
4514 } else if (Name.endswith("_sdwa")) {
4515 setForcedSDWA(true);
4516 return Name.substr(0, Name.size() - 5);
4517 }
4518 return Name;
4519}
4520
Tom Stellard45bb48e2015-06-13 03:28:10 +00004521bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
4522 StringRef Name,
4523 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00004524 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00004525 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004526 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00004527
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004528 bool IsMIMG = Name.startswith("image_");
4529
Tom Stellard45bb48e2015-06-13 03:28:10 +00004530 while (!getLexer().is(AsmToken::EndOfStatement)) {
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004531 OperandMode Mode = OperandMode_Default;
4532 if (IsMIMG && isGFX10() && Operands.size() == 2)
4533 Mode = OperandMode_NSA;
4534 OperandMatchResultTy Res = parseOperand(Operands, Name, Mode);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004535
4536 // Eat the comma or space if there is one.
4537 if (getLexer().is(AsmToken::Comma))
4538 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00004539
Tom Stellard45bb48e2015-06-13 03:28:10 +00004540 switch (Res) {
4541 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004542 case MatchOperand_ParseFail:
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004543 // FIXME: use real operand location rather than the current location.
Sam Kolton1bdcef72016-05-23 09:59:02 +00004544 Error(getLexer().getLoc(), "failed parsing operand.");
4545 while (!getLexer().is(AsmToken::EndOfStatement)) {
4546 Parser.Lex();
4547 }
4548 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004549 case MatchOperand_NoMatch:
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004550 // FIXME: use real operand location rather than the current location.
Sam Kolton1bdcef72016-05-23 09:59:02 +00004551 Error(getLexer().getLoc(), "not a valid operand.");
4552 while (!getLexer().is(AsmToken::EndOfStatement)) {
4553 Parser.Lex();
4554 }
4555 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004556 }
4557 }
4558
Tom Stellard45bb48e2015-06-13 03:28:10 +00004559 return false;
4560}
4561
4562//===----------------------------------------------------------------------===//
4563// Utility functions
4564//===----------------------------------------------------------------------===//
4565
Alex Bradbury58eba092016-11-01 16:32:05 +00004566OperandMatchResultTy
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004567AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &IntVal) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00004568
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004569 if (!trySkipId(Prefix, AsmToken::Colon))
4570 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004571
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004572 return parseExpr(IntVal) ? MatchOperand_Success : MatchOperand_ParseFail;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004573}
4574
Alex Bradbury58eba092016-11-01 16:32:05 +00004575OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004576AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004577 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004578 bool (*ConvertResult)(int64_t&)) {
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004579 SMLoc S = getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004580 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004581
Alex Bradbury58eba092016-11-01 16:32:05 +00004582 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004583 if (Res != MatchOperand_Success)
4584 return Res;
4585
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004586 if (ConvertResult && !ConvertResult(Value)) {
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00004587 Error(S, "invalid " + StringRef(Prefix) + " value.");
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004588 }
4589
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004590 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004591 return MatchOperand_Success;
4592}
4593
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004594OperandMatchResultTy
4595AMDGPUAsmParser::parseOperandArrayWithPrefix(const char *Prefix,
4596 OperandVector &Operands,
4597 AMDGPUOperand::ImmTy ImmTy,
4598 bool (*ConvertResult)(int64_t&)) {
4599 SMLoc S = getLoc();
4600 if (!trySkipId(Prefix, AsmToken::Colon))
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004601 return MatchOperand_NoMatch;
4602
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004603 if (!skipToken(AsmToken::LBrac, "expected a left square bracket"))
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004604 return MatchOperand_ParseFail;
4605
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004606 unsigned Val = 0;
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004607 const unsigned MaxSize = 4;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004608
4609 // FIXME: How to verify the number of elements matches the number of src
4610 // operands?
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004611 for (int I = 0; ; ++I) {
4612 int64_t Op;
4613 SMLoc Loc = getLoc();
4614 if (!parseExpr(Op))
4615 return MatchOperand_ParseFail;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004616
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004617 if (Op != 0 && Op != 1) {
4618 Error(Loc, "invalid " + StringRef(Prefix) + " value.");
4619 return MatchOperand_ParseFail;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004620 }
4621
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004622 Val |= (Op << I);
Dmitry Preobrazhensky7773fc42019-05-22 13:59:01 +00004623
4624 if (trySkipToken(AsmToken::RBrac))
4625 break;
4626
4627 if (I + 1 == MaxSize) {
4628 Error(getLoc(), "expected a closing square bracket");
4629 return MatchOperand_ParseFail;
4630 }
4631
4632 if (!skipToken(AsmToken::Comma, "expected a comma"))
4633 return MatchOperand_ParseFail;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004634 }
4635
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004636 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
4637 return MatchOperand_Success;
4638}
4639
Alex Bradbury58eba092016-11-01 16:32:05 +00004640OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004641AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004642 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00004643 int64_t Bit = 0;
4644 SMLoc S = Parser.getTok().getLoc();
4645
4646 // We are at the end of the statement, and this is a default argument, so
4647 // use a default value.
4648 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4649 switch(getLexer().getKind()) {
4650 case AsmToken::Identifier: {
4651 StringRef Tok = Parser.getTok().getString();
4652 if (Tok == Name) {
Ryan Taylor1f334d02018-08-28 15:07:30 +00004653 if (Tok == "r128" && isGFX9())
4654 Error(S, "r128 modifier is not supported on this GPU");
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00004655 if (Tok == "a16" && !isGFX9() && !isGFX10())
Ryan Taylor1f334d02018-08-28 15:07:30 +00004656 Error(S, "a16 modifier is not supported on this GPU");
Tom Stellard45bb48e2015-06-13 03:28:10 +00004657 Bit = 1;
4658 Parser.Lex();
4659 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
4660 Bit = 0;
4661 Parser.Lex();
4662 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00004663 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004664 }
4665 break;
4666 }
4667 default:
4668 return MatchOperand_NoMatch;
4669 }
4670 }
4671
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00004672 if (!isGFX10() && ImmTy == AMDGPUOperand::ImmTyDLC)
4673 return MatchOperand_ParseFail;
4674
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004675 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004676 return MatchOperand_Success;
4677}
4678
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004679static void addOptionalImmOperand(
4680 MCInst& Inst, const OperandVector& Operands,
4681 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
4682 AMDGPUOperand::ImmTy ImmT,
4683 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004684 auto i = OptionalIdx.find(ImmT);
4685 if (i != OptionalIdx.end()) {
4686 unsigned Idx = i->second;
4687 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
4688 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00004689 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004690 }
4691}
4692
Alex Bradbury58eba092016-11-01 16:32:05 +00004693OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00004694AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00004695 if (getLexer().isNot(AsmToken::Identifier)) {
4696 return MatchOperand_NoMatch;
4697 }
4698 StringRef Tok = Parser.getTok().getString();
4699 if (Tok != Prefix) {
4700 return MatchOperand_NoMatch;
4701 }
4702
4703 Parser.Lex();
4704 if (getLexer().isNot(AsmToken::Colon)) {
4705 return MatchOperand_ParseFail;
4706 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00004707
Sam Kolton3025e7f2016-04-26 13:33:56 +00004708 Parser.Lex();
4709 if (getLexer().isNot(AsmToken::Identifier)) {
4710 return MatchOperand_ParseFail;
4711 }
4712
4713 Value = Parser.getTok().getString();
4714 return MatchOperand_Success;
4715}
4716
Tim Renouf35484c92018-08-21 11:06:05 +00004717// dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their
4718// values to live in a joint format operand in the MCInst encoding.
4719OperandMatchResultTy
4720AMDGPUAsmParser::parseDfmtNfmt(OperandVector &Operands) {
4721 SMLoc S = Parser.getTok().getLoc();
4722 int64_t Dfmt = 0, Nfmt = 0;
4723 // dfmt and nfmt can appear in either order, and each is optional.
4724 bool GotDfmt = false, GotNfmt = false;
4725 while (!GotDfmt || !GotNfmt) {
4726 if (!GotDfmt) {
4727 auto Res = parseIntWithPrefix("dfmt", Dfmt);
4728 if (Res != MatchOperand_NoMatch) {
4729 if (Res != MatchOperand_Success)
4730 return Res;
4731 if (Dfmt >= 16) {
4732 Error(Parser.getTok().getLoc(), "out of range dfmt");
4733 return MatchOperand_ParseFail;
4734 }
4735 GotDfmt = true;
4736 Parser.Lex();
4737 continue;
4738 }
4739 }
4740 if (!GotNfmt) {
4741 auto Res = parseIntWithPrefix("nfmt", Nfmt);
4742 if (Res != MatchOperand_NoMatch) {
4743 if (Res != MatchOperand_Success)
4744 return Res;
4745 if (Nfmt >= 8) {
4746 Error(Parser.getTok().getLoc(), "out of range nfmt");
4747 return MatchOperand_ParseFail;
4748 }
4749 GotNfmt = true;
4750 Parser.Lex();
4751 continue;
4752 }
4753 }
4754 break;
4755 }
4756 if (!GotDfmt && !GotNfmt)
4757 return MatchOperand_NoMatch;
4758 auto Format = Dfmt | Nfmt << 4;
4759 Operands.push_back(
4760 AMDGPUOperand::CreateImm(this, Format, S, AMDGPUOperand::ImmTyFORMAT));
4761 return MatchOperand_Success;
4762}
4763
Tom Stellard45bb48e2015-06-13 03:28:10 +00004764//===----------------------------------------------------------------------===//
4765// ds
4766//===----------------------------------------------------------------------===//
4767
Tom Stellard45bb48e2015-06-13 03:28:10 +00004768void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
4769 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004770 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004771
4772 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4773 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4774
4775 // Add the register arguments
4776 if (Op.isReg()) {
4777 Op.addRegOperands(Inst, 1);
4778 continue;
4779 }
4780
4781 // Handle optional arguments
4782 OptionalIdx[Op.getImmTy()] = i;
4783 }
4784
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004785 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
4786 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004787 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004788
Tom Stellard45bb48e2015-06-13 03:28:10 +00004789 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
4790}
4791
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004792void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
4793 bool IsGdsHardcoded) {
4794 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004795
4796 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4797 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4798
4799 // Add the register arguments
4800 if (Op.isReg()) {
4801 Op.addRegOperands(Inst, 1);
4802 continue;
4803 }
4804
4805 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00004806 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004807 continue;
4808 }
4809
4810 // Handle optional arguments
4811 OptionalIdx[Op.getImmTy()] = i;
4812 }
4813
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004814 AMDGPUOperand::ImmTy OffsetType =
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00004815 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_gfx10 ||
4816 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_gfx6_gfx7 ||
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004817 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
4818 AMDGPUOperand::ImmTyOffset;
4819
4820 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
4821
Artem Tamazov43b61562017-02-03 12:47:30 +00004822 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004823 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004824 }
4825 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
4826}
4827
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004828void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
4829 OptionalImmIndexMap OptionalIdx;
4830
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004831 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004832 unsigned EnMask = 0;
4833 int SrcIdx = 0;
4834
4835 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4836 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4837
4838 // Add the register arguments
4839 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004840 assert(SrcIdx < 4);
4841 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004842 Op.addRegOperands(Inst, 1);
4843 ++SrcIdx;
4844 continue;
4845 }
4846
4847 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004848 assert(SrcIdx < 4);
4849 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004850 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004851 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004852 continue;
4853 }
4854
4855 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
4856 Op.addImmOperands(Inst, 1);
4857 continue;
4858 }
4859
4860 if (Op.isToken() && Op.getToken() == "done")
4861 continue;
4862
4863 // Handle optional arguments
4864 OptionalIdx[Op.getImmTy()] = i;
4865 }
4866
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004867 assert(SrcIdx == 4);
4868
4869 bool Compr = false;
4870 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
4871 Compr = true;
4872 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
4873 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
4874 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
4875 }
4876
4877 for (auto i = 0; i < SrcIdx; ++i) {
4878 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
4879 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
4880 }
4881 }
4882
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004883 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
4884 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
4885
4886 Inst.addOperand(MCOperand::createImm(EnMask));
4887}
Tom Stellard45bb48e2015-06-13 03:28:10 +00004888
4889//===----------------------------------------------------------------------===//
4890// s_waitcnt
4891//===----------------------------------------------------------------------===//
4892
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004893static bool
4894encodeCnt(
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004895 const AMDGPU::IsaVersion ISA,
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004896 int64_t &IntVal,
4897 int64_t CntVal,
4898 bool Saturate,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004899 unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned),
4900 unsigned (*decode)(const IsaVersion &Version, unsigned))
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004901{
4902 bool Failed = false;
4903
4904 IntVal = encode(ISA, IntVal, CntVal);
4905 if (CntVal != decode(ISA, IntVal)) {
4906 if (Saturate) {
4907 IntVal = encode(ISA, IntVal, -1);
4908 } else {
4909 Failed = true;
4910 }
4911 }
4912 return Failed;
4913}
4914
Tom Stellard45bb48e2015-06-13 03:28:10 +00004915bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004916
4917 SMLoc CntLoc = getLoc();
4918 StringRef CntName = getTokenStr();
4919
4920 if (!skipToken(AsmToken::Identifier, "expected a counter name") ||
4921 !skipToken(AsmToken::LParen, "expected a left parenthesis"))
4922 return false;
4923
Tom Stellard45bb48e2015-06-13 03:28:10 +00004924 int64_t CntVal;
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004925 SMLoc ValLoc = getLoc();
4926 if (!parseExpr(CntVal))
4927 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004928
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004929 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Tom Stellard45bb48e2015-06-13 03:28:10 +00004930
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004931 bool Failed = true;
4932 bool Sat = CntName.endswith("_sat");
4933
4934 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
4935 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
4936 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
4937 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
4938 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
4939 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004940 } else {
4941 Error(CntLoc, "invalid counter name " + CntName);
4942 return false;
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004943 }
4944
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004945 if (Failed) {
4946 Error(ValLoc, "too large value for " + CntName);
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004947 return false;
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004948 }
4949
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004950 if (!skipToken(AsmToken::RParen, "expected a closing parenthesis"))
4951 return false;
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004952
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004953 if (trySkipToken(AsmToken::Amp) || trySkipToken(AsmToken::Comma)) {
4954 if (isToken(AsmToken::EndOfStatement)) {
4955 Error(getLoc(), "expected a counter name");
4956 return false;
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004957 }
4958 }
4959
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004960 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004961}
4962
Alex Bradbury58eba092016-11-01 16:32:05 +00004963OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004964AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004965 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00004966 int64_t Waitcnt = getWaitcntBitMask(ISA);
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004967 SMLoc S = getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00004968
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004969 // If parse failed, do not return error code
4970 // to avoid excessive error messages.
4971 if (isToken(AsmToken::Identifier) && peekToken().is(AsmToken::LParen)) {
4972 while (parseCnt(Waitcnt) && !isToken(AsmToken::EndOfStatement));
4973 } else {
4974 parseExpr(Waitcnt);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004975 }
Dmitry Preobrazhenskyb79af792019-05-27 14:08:43 +00004976
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004977 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004978 return MatchOperand_Success;
4979}
4980
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00004981bool
4982AMDGPUOperand::isSWaitCnt() const {
4983 return isImm();
4984}
4985
4986//===----------------------------------------------------------------------===//
4987// hwreg
4988//===----------------------------------------------------------------------===//
4989
4990bool
4991AMDGPUAsmParser::parseHwregBody(OperandInfoTy &HwReg,
4992 int64_t &Offset,
4993 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004994 using namespace llvm::AMDGPU::Hwreg;
4995
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00004996 // The register may be specified by name or using a numeric code
4997 if (isToken(AsmToken::Identifier) &&
4998 (HwReg.Id = getHwregId(getTokenStr())) >= 0) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004999 HwReg.IsSymbolic = true;
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005000 lex(); // skip message name
5001 } else if (!parseExpr(HwReg.Id)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00005002 return false;
5003 }
5004
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005005 if (trySkipToken(AsmToken::RParen))
Artem Tamazovd6468662016-04-25 14:13:51 +00005006 return true;
5007
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005008 // parse optional params
5009 return
5010 skipToken(AsmToken::Comma, "expected a comma or a closing parenthesis") &&
5011 parseExpr(Offset) &&
5012 skipToken(AsmToken::Comma, "expected a comma") &&
5013 parseExpr(Width) &&
5014 skipToken(AsmToken::RParen, "expected a closing parenthesis");
Artem Tamazovd6468662016-04-25 14:13:51 +00005015}
5016
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00005017bool
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005018AMDGPUAsmParser::validateHwreg(const OperandInfoTy &HwReg,
5019 const int64_t Offset,
5020 const int64_t Width,
5021 const SMLoc Loc) {
5022
Artem Tamazov6edc1352016-05-26 17:00:33 +00005023 using namespace llvm::AMDGPU::Hwreg;
5024
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005025 if (HwReg.IsSymbolic && !isValidHwreg(HwReg.Id, getSTI())) {
5026 Error(Loc, "specified hardware register is not supported on this GPU");
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00005027 return false;
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005028 } else if (!isValidHwreg(HwReg.Id)) {
5029 Error(Loc, "invalid code of hardware register: only 6-bit values are legal");
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00005030 return false;
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005031 } else if (!isValidHwregOffset(Offset)) {
5032 Error(Loc, "invalid bit offset: only 5-bit values are legal");
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00005033 return false;
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005034 } else if (!isValidHwregWidth(Width)) {
5035 Error(Loc, "invalid bitfield width: only values from 1 to 32 are legal");
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00005036 return false;
Artem Tamazovd6468662016-04-25 14:13:51 +00005037 }
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00005038 return true;
Artem Tamazovd6468662016-04-25 14:13:51 +00005039}
5040
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005041OperandMatchResultTy
5042AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
5043 using namespace llvm::AMDGPU::Hwreg;
5044
5045 int64_t ImmVal = 0;
5046 SMLoc Loc = getLoc();
5047
5048 // If parse failed, do not return error code
5049 // to avoid excessive error messages.
5050 if (trySkipId("hwreg", AsmToken::LParen)) {
5051 OperandInfoTy HwReg(ID_UNKNOWN_);
5052 int64_t Offset = OFFSET_DEFAULT_;
5053 int64_t Width = WIDTH_DEFAULT_;
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00005054 if (parseHwregBody(HwReg, Offset, Width) &&
5055 validateHwreg(HwReg, Offset, Width, Loc)) {
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005056 ImmVal = encodeHwreg(HwReg.Id, Offset, Width);
5057 }
5058 } else if (parseExpr(ImmVal)) {
5059 if (ImmVal < 0 || !isUInt<16>(ImmVal))
5060 Error(Loc, "invalid immediate: only 16-bit values are legal");
5061 }
5062
5063 Operands.push_back(AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTyHwreg));
5064 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005065}
5066
Artem Tamazovd6468662016-04-25 14:13:51 +00005067bool AMDGPUOperand::isHwreg() const {
5068 return isImmTy(ImmTyHwreg);
5069}
5070
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +00005071//===----------------------------------------------------------------------===//
5072// sendmsg
5073//===----------------------------------------------------------------------===//
5074
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005075bool
5076AMDGPUAsmParser::parseSendMsgBody(OperandInfoTy &Msg,
5077 OperandInfoTy &Op,
5078 OperandInfoTy &Stream) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00005079 using namespace llvm::AMDGPU::SendMsg;
5080
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005081 if (isToken(AsmToken::Identifier) && (Msg.Id = getMsgId(getTokenStr())) >= 0) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00005082 Msg.IsSymbolic = true;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005083 lex(); // skip message name
5084 } else if (!parseExpr(Msg.Id)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00005085 return false;
5086 }
5087
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005088 if (trySkipToken(AsmToken::Comma)) {
5089 Op.IsDefined = true;
5090 if (isToken(AsmToken::Identifier) &&
5091 (Op.Id = getMsgOpId(Msg.Id, getTokenStr())) >= 0) {
5092 lex(); // skip operation name
5093 } else if (!parseExpr(Op.Id)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00005094 return false;
5095 }
5096
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005097 if (trySkipToken(AsmToken::Comma)) {
5098 Stream.IsDefined = true;
5099 if (!parseExpr(Stream.Id))
5100 return false;
5101 }
Artem Tamazovebe71ce2016-05-06 17:48:48 +00005102 }
5103
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005104 return skipToken(AsmToken::RParen, "expected a closing parenthesis");
Artem Tamazovebe71ce2016-05-06 17:48:48 +00005105}
5106
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005107bool
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005108AMDGPUAsmParser::validateSendMsg(const OperandInfoTy &Msg,
5109 const OperandInfoTy &Op,
5110 const OperandInfoTy &Stream,
5111 const SMLoc S) {
5112 using namespace llvm::AMDGPU::SendMsg;
5113
5114 // Validation strictness depends on whether message is specified
5115 // in a symbolc or in a numeric form. In the latter case
5116 // only encoding possibility is checked.
5117 bool Strict = Msg.IsSymbolic;
5118
5119 if (!isValidMsgId(Msg.Id, getSTI(), Strict)) {
5120 Error(S, "invalid message id");
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005121 return false;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005122 } else if (Strict && (msgRequiresOp(Msg.Id) != Op.IsDefined)) {
5123 Error(S, Op.IsDefined ?
5124 "message does not support operations" :
5125 "missing message operation");
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005126 return false;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005127 } else if (!isValidMsgOp(Msg.Id, Op.Id, Strict)) {
5128 Error(S, "invalid operation id");
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005129 return false;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005130 } else if (Strict && !msgSupportsStream(Msg.Id, Op.Id) && Stream.IsDefined) {
5131 Error(S, "message operation does not support streams");
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005132 return false;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005133 } else if (!isValidMsgStream(Msg.Id, Op.Id, Stream.Id, Strict)) {
5134 Error(S, "invalid message stream id");
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005135 return false;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005136 }
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005137 return true;
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005138}
5139
5140OperandMatchResultTy
5141AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
5142 using namespace llvm::AMDGPU::SendMsg;
5143
5144 int64_t ImmVal = 0;
5145 SMLoc Loc = getLoc();
5146
5147 // If parse failed, do not return error code
5148 // to avoid excessive error messages.
5149 if (trySkipId("sendmsg", AsmToken::LParen)) {
5150 OperandInfoTy Msg(ID_UNKNOWN_);
5151 OperandInfoTy Op(OP_NONE_);
5152 OperandInfoTy Stream(STREAM_ID_NONE_);
Dmitry Preobrazhenskyd12966c2019-06-28 15:22:47 +00005153 if (parseSendMsgBody(Msg, Op, Stream) &&
5154 validateSendMsg(Msg, Op, Stream, Loc)) {
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005155 ImmVal = encodeMsg(Msg.Id, Op.Id, Stream.Id);
5156 }
5157 } else if (parseExpr(ImmVal)) {
5158 if (ImmVal < 0 || !isUInt<16>(ImmVal))
5159 Error(Loc, "invalid immediate: only 16-bit values are legal");
5160 }
5161
5162 Operands.push_back(AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTySendMsg));
5163 return MatchOperand_Success;
5164}
5165
5166bool AMDGPUOperand::isSendMsg() const {
5167 return isImmTy(ImmTySendMsg);
5168}
5169
5170//===----------------------------------------------------------------------===//
5171// v_interp
5172//===----------------------------------------------------------------------===//
5173
Matt Arsenault0e8a2992016-12-15 20:40:20 +00005174OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
5175 if (getLexer().getKind() != AsmToken::Identifier)
5176 return MatchOperand_NoMatch;
5177
5178 StringRef Str = Parser.getTok().getString();
5179 int Slot = StringSwitch<int>(Str)
5180 .Case("p10", 0)
5181 .Case("p20", 1)
5182 .Case("p0", 2)
5183 .Default(-1);
5184
5185 SMLoc S = Parser.getTok().getLoc();
5186 if (Slot == -1)
5187 return MatchOperand_ParseFail;
5188
5189 Parser.Lex();
5190 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
5191 AMDGPUOperand::ImmTyInterpSlot));
5192 return MatchOperand_Success;
5193}
5194
5195OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
5196 if (getLexer().getKind() != AsmToken::Identifier)
5197 return MatchOperand_NoMatch;
5198
5199 StringRef Str = Parser.getTok().getString();
5200 if (!Str.startswith("attr"))
5201 return MatchOperand_NoMatch;
5202
5203 StringRef Chan = Str.take_back(2);
5204 int AttrChan = StringSwitch<int>(Chan)
5205 .Case(".x", 0)
5206 .Case(".y", 1)
5207 .Case(".z", 2)
5208 .Case(".w", 3)
5209 .Default(-1);
5210 if (AttrChan == -1)
5211 return MatchOperand_ParseFail;
5212
5213 Str = Str.drop_back(2).drop_front(4);
5214
5215 uint8_t Attr;
5216 if (Str.getAsInteger(10, Attr))
5217 return MatchOperand_ParseFail;
5218
5219 SMLoc S = Parser.getTok().getLoc();
5220 Parser.Lex();
5221 if (Attr > 63) {
5222 Error(S, "out of bounds attr");
5223 return MatchOperand_Success;
5224 }
5225
5226 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
5227
5228 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
5229 AMDGPUOperand::ImmTyInterpAttr));
5230 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
5231 AMDGPUOperand::ImmTyAttrChan));
5232 return MatchOperand_Success;
5233}
5234
Dmitry Preobrazhensky1d572ce2019-06-28 14:14:02 +00005235//===----------------------------------------------------------------------===//
5236// exp
5237//===----------------------------------------------------------------------===//
5238
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005239void AMDGPUAsmParser::errorExpTgt() {
5240 Error(Parser.getTok().getLoc(), "invalid exp target");
5241}
5242
5243OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
5244 uint8_t &Val) {
5245 if (Str == "null") {
5246 Val = 9;
5247 return MatchOperand_Success;
5248 }
5249
5250 if (Str.startswith("mrt")) {
5251 Str = Str.drop_front(3);
5252 if (Str == "z") { // == mrtz
5253 Val = 8;
5254 return MatchOperand_Success;
5255 }
5256
5257 if (Str.getAsInteger(10, Val))
5258 return MatchOperand_ParseFail;
5259
5260 if (Val > 7)
5261 errorExpTgt();
5262
5263 return MatchOperand_Success;
5264 }
5265
5266 if (Str.startswith("pos")) {
5267 Str = Str.drop_front(3);
5268 if (Str.getAsInteger(10, Val))
5269 return MatchOperand_ParseFail;
5270
Stanislav Mekhanoshin1dbf7212019-05-08 21:23:37 +00005271 if (Val > 4 || (Val == 4 && !isGFX10()))
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005272 errorExpTgt();
5273
5274 Val += 12;
5275 return MatchOperand_Success;
5276 }
5277
Stanislav Mekhanoshin1dbf7212019-05-08 21:23:37 +00005278 if (isGFX10() && Str == "prim") {
5279 Val = 20;
5280 return MatchOperand_Success;
5281 }
5282
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005283 if (Str.startswith("param")) {
5284 Str = Str.drop_front(5);
5285 if (Str.getAsInteger(10, Val))
5286 return MatchOperand_ParseFail;
5287
5288 if (Val >= 32)
5289 errorExpTgt();
5290
5291 Val += 32;
5292 return MatchOperand_Success;
5293 }
5294
5295 if (Str.startswith("invalid_target_")) {
5296 Str = Str.drop_front(15);
5297 if (Str.getAsInteger(10, Val))
5298 return MatchOperand_ParseFail;
5299
5300 errorExpTgt();
5301 return MatchOperand_Success;
5302 }
5303
5304 return MatchOperand_NoMatch;
5305}
5306
5307OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
5308 uint8_t Val;
5309 StringRef Str = Parser.getTok().getString();
5310
5311 auto Res = parseExpTgtImpl(Str, Val);
5312 if (Res != MatchOperand_Success)
5313 return Res;
5314
5315 SMLoc S = Parser.getTok().getLoc();
5316 Parser.Lex();
5317
5318 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
5319 AMDGPUOperand::ImmTyExpTgt));
5320 return MatchOperand_Success;
5321}
5322
Tom Stellard45bb48e2015-06-13 03:28:10 +00005323//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005324// parser helpers
5325//===----------------------------------------------------------------------===//
5326
5327bool
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005328AMDGPUAsmParser::isId(const AsmToken &Token, const StringRef Id) const {
5329 return Token.is(AsmToken::Identifier) && Token.getString() == Id;
5330}
5331
5332bool
5333AMDGPUAsmParser::isId(const StringRef Id) const {
5334 return isId(getToken(), Id);
5335}
5336
5337bool
5338AMDGPUAsmParser::isToken(const AsmToken::TokenKind Kind) const {
5339 return getTokenKind() == Kind;
5340}
5341
5342bool
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005343AMDGPUAsmParser::trySkipId(const StringRef Id) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005344 if (isId(Id)) {
5345 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005346 return true;
5347 }
5348 return false;
5349}
5350
5351bool
Dmitry Preobrazhensky198611b2019-05-17 16:04:17 +00005352AMDGPUAsmParser::trySkipId(const StringRef Id, const AsmToken::TokenKind Kind) {
5353 if (isId(Id) && peekToken().is(Kind)) {
5354 lex();
5355 lex();
5356 return true;
5357 }
5358 return false;
5359}
5360
5361bool
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005362AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005363 if (isToken(Kind)) {
5364 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005365 return true;
5366 }
5367 return false;
5368}
5369
5370bool
5371AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
5372 const StringRef ErrMsg) {
5373 if (!trySkipToken(Kind)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005374 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005375 return false;
5376 }
5377 return true;
5378}
5379
5380bool
5381AMDGPUAsmParser::parseExpr(int64_t &Imm) {
5382 return !getParser().parseAbsoluteExpression(Imm);
5383}
5384
5385bool
Dmitry Preobrazhensky4ccb7f82019-07-19 13:12:47 +00005386AMDGPUAsmParser::parseExpr(OperandVector &Operands) {
5387 SMLoc S = getLoc();
5388
5389 const MCExpr *Expr;
5390 if (Parser.parseExpression(Expr))
5391 return false;
5392
5393 int64_t IntVal;
5394 if (Expr->evaluateAsAbsolute(IntVal)) {
5395 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
5396 } else {
5397 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
5398 }
5399 return true;
5400}
5401
5402bool
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005403AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005404 if (isToken(AsmToken::String)) {
5405 Val = getToken().getStringContents();
5406 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005407 return true;
5408 } else {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005409 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005410 return false;
5411 }
5412}
5413
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005414AsmToken
5415AMDGPUAsmParser::getToken() const {
5416 return Parser.getTok();
5417}
5418
5419AsmToken
5420AMDGPUAsmParser::peekToken() {
5421 return getLexer().peekTok();
5422}
5423
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00005424void
5425AMDGPUAsmParser::peekTokens(MutableArrayRef<AsmToken> Tokens) {
5426 auto TokCount = getLexer().peekTokens(Tokens);
5427
5428 for (auto Idx = TokCount; Idx < Tokens.size(); ++Idx)
5429 Tokens[Idx] = AsmToken(AsmToken::Error, "");
5430}
5431
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005432AsmToken::TokenKind
5433AMDGPUAsmParser::getTokenKind() const {
5434 return getLexer().getKind();
5435}
5436
5437SMLoc
5438AMDGPUAsmParser::getLoc() const {
5439 return getToken().getLoc();
5440}
5441
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00005442StringRef
5443AMDGPUAsmParser::getTokenStr() const {
5444 return getToken().getString();
5445}
5446
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00005447void
5448AMDGPUAsmParser::lex() {
5449 Parser.Lex();
5450}
5451
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005452//===----------------------------------------------------------------------===//
5453// swizzle
5454//===----------------------------------------------------------------------===//
5455
5456LLVM_READNONE
5457static unsigned
5458encodeBitmaskPerm(const unsigned AndMask,
5459 const unsigned OrMask,
5460 const unsigned XorMask) {
5461 using namespace llvm::AMDGPU::Swizzle;
5462
5463 return BITMASK_PERM_ENC |
5464 (AndMask << BITMASK_AND_SHIFT) |
5465 (OrMask << BITMASK_OR_SHIFT) |
5466 (XorMask << BITMASK_XOR_SHIFT);
5467}
5468
5469bool
5470AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
5471 const unsigned MinVal,
5472 const unsigned MaxVal,
5473 const StringRef ErrMsg) {
5474 for (unsigned i = 0; i < OpNum; ++i) {
5475 if (!skipToken(AsmToken::Comma, "expected a comma")){
5476 return false;
5477 }
5478 SMLoc ExprLoc = Parser.getTok().getLoc();
5479 if (!parseExpr(Op[i])) {
5480 return false;
5481 }
5482 if (Op[i] < MinVal || Op[i] > MaxVal) {
5483 Error(ExprLoc, ErrMsg);
5484 return false;
5485 }
5486 }
5487
5488 return true;
5489}
5490
5491bool
5492AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
5493 using namespace llvm::AMDGPU::Swizzle;
5494
5495 int64_t Lane[LANE_NUM];
5496 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
5497 "expected a 2-bit lane id")) {
5498 Imm = QUAD_PERM_ENC;
Stanislav Mekhanoshin266f1572019-03-11 16:49:32 +00005499 for (unsigned I = 0; I < LANE_NUM; ++I) {
5500 Imm |= Lane[I] << (LANE_SHIFT * I);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005501 }
5502 return true;
5503 }
5504 return false;
5505}
5506
5507bool
5508AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
5509 using namespace llvm::AMDGPU::Swizzle;
5510
5511 SMLoc S = Parser.getTok().getLoc();
5512 int64_t GroupSize;
5513 int64_t LaneIdx;
5514
5515 if (!parseSwizzleOperands(1, &GroupSize,
5516 2, 32,
5517 "group size must be in the interval [2,32]")) {
5518 return false;
5519 }
5520 if (!isPowerOf2_64(GroupSize)) {
5521 Error(S, "group size must be a power of two");
5522 return false;
5523 }
5524 if (parseSwizzleOperands(1, &LaneIdx,
5525 0, GroupSize - 1,
5526 "lane id must be in the interval [0,group size - 1]")) {
5527 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
5528 return true;
5529 }
5530 return false;
5531}
5532
5533bool
5534AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
5535 using namespace llvm::AMDGPU::Swizzle;
5536
5537 SMLoc S = Parser.getTok().getLoc();
5538 int64_t GroupSize;
5539
5540 if (!parseSwizzleOperands(1, &GroupSize,
5541 2, 32, "group size must be in the interval [2,32]")) {
5542 return false;
5543 }
5544 if (!isPowerOf2_64(GroupSize)) {
5545 Error(S, "group size must be a power of two");
5546 return false;
5547 }
5548
5549 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
5550 return true;
5551}
5552
5553bool
5554AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
5555 using namespace llvm::AMDGPU::Swizzle;
5556
5557 SMLoc S = Parser.getTok().getLoc();
5558 int64_t GroupSize;
5559
5560 if (!parseSwizzleOperands(1, &GroupSize,
5561 1, 16, "group size must be in the interval [1,16]")) {
5562 return false;
5563 }
5564 if (!isPowerOf2_64(GroupSize)) {
5565 Error(S, "group size must be a power of two");
5566 return false;
5567 }
5568
5569 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
5570 return true;
5571}
5572
5573bool
5574AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
5575 using namespace llvm::AMDGPU::Swizzle;
5576
5577 if (!skipToken(AsmToken::Comma, "expected a comma")) {
5578 return false;
5579 }
5580
5581 StringRef Ctl;
5582 SMLoc StrLoc = Parser.getTok().getLoc();
5583 if (!parseString(Ctl)) {
5584 return false;
5585 }
5586 if (Ctl.size() != BITMASK_WIDTH) {
5587 Error(StrLoc, "expected a 5-character mask");
5588 return false;
5589 }
5590
5591 unsigned AndMask = 0;
5592 unsigned OrMask = 0;
5593 unsigned XorMask = 0;
5594
5595 for (size_t i = 0; i < Ctl.size(); ++i) {
5596 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
5597 switch(Ctl[i]) {
5598 default:
5599 Error(StrLoc, "invalid mask");
5600 return false;
5601 case '0':
5602 break;
5603 case '1':
5604 OrMask |= Mask;
5605 break;
5606 case 'p':
5607 AndMask |= Mask;
5608 break;
5609 case 'i':
5610 AndMask |= Mask;
5611 XorMask |= Mask;
5612 break;
5613 }
5614 }
5615
5616 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
5617 return true;
5618}
5619
5620bool
5621AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
5622
5623 SMLoc OffsetLoc = Parser.getTok().getLoc();
5624
5625 if (!parseExpr(Imm)) {
5626 return false;
5627 }
5628 if (!isUInt<16>(Imm)) {
5629 Error(OffsetLoc, "expected a 16-bit offset");
5630 return false;
5631 }
5632 return true;
5633}
5634
5635bool
5636AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
5637 using namespace llvm::AMDGPU::Swizzle;
5638
5639 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
5640
5641 SMLoc ModeLoc = Parser.getTok().getLoc();
5642 bool Ok = false;
5643
5644 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
5645 Ok = parseSwizzleQuadPerm(Imm);
5646 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
5647 Ok = parseSwizzleBitmaskPerm(Imm);
5648 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
5649 Ok = parseSwizzleBroadcast(Imm);
5650 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
5651 Ok = parseSwizzleSwap(Imm);
5652 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
5653 Ok = parseSwizzleReverse(Imm);
5654 } else {
5655 Error(ModeLoc, "expected a swizzle mode");
5656 }
5657
5658 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
5659 }
5660
5661 return false;
5662}
5663
5664OperandMatchResultTy
5665AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
5666 SMLoc S = Parser.getTok().getLoc();
5667 int64_t Imm = 0;
5668
5669 if (trySkipId("offset")) {
5670
5671 bool Ok = false;
5672 if (skipToken(AsmToken::Colon, "expected a colon")) {
5673 if (trySkipId("swizzle")) {
5674 Ok = parseSwizzleMacro(Imm);
5675 } else {
5676 Ok = parseSwizzleOffset(Imm);
5677 }
5678 }
5679
5680 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
5681
5682 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
5683 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00005684 // Swizzle "offset" operand is optional.
5685 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00005686 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00005687 }
5688}
5689
5690bool
5691AMDGPUOperand::isSwizzle() const {
5692 return isImmTy(ImmTySwizzle);
5693}
5694
5695//===----------------------------------------------------------------------===//
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00005696// VGPR Index Mode
5697//===----------------------------------------------------------------------===//
5698
5699int64_t AMDGPUAsmParser::parseGPRIdxMacro() {
5700
5701 using namespace llvm::AMDGPU::VGPRIndexMode;
5702
5703 if (trySkipToken(AsmToken::RParen)) {
5704 return OFF;
5705 }
5706
5707 int64_t Imm = 0;
5708
5709 while (true) {
5710 unsigned Mode = 0;
5711 SMLoc S = Parser.getTok().getLoc();
5712
5713 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
5714 if (trySkipId(IdSymbolic[ModeId])) {
5715 Mode = 1 << ModeId;
5716 break;
5717 }
5718 }
5719
5720 if (Mode == 0) {
5721 Error(S, (Imm == 0)?
5722 "expected a VGPR index mode or a closing parenthesis" :
5723 "expected a VGPR index mode");
5724 break;
5725 }
5726
5727 if (Imm & Mode) {
5728 Error(S, "duplicate VGPR index mode");
5729 break;
5730 }
5731 Imm |= Mode;
5732
5733 if (trySkipToken(AsmToken::RParen))
5734 break;
5735 if (!skipToken(AsmToken::Comma,
5736 "expected a comma or a closing parenthesis"))
5737 break;
5738 }
5739
5740 return Imm;
5741}
5742
5743OperandMatchResultTy
5744AMDGPUAsmParser::parseGPRIdxMode(OperandVector &Operands) {
5745
5746 int64_t Imm = 0;
5747 SMLoc S = Parser.getTok().getLoc();
5748
5749 if (getLexer().getKind() == AsmToken::Identifier &&
5750 Parser.getTok().getString() == "gpr_idx" &&
5751 getLexer().peekTok().is(AsmToken::LParen)) {
5752
5753 Parser.Lex();
5754 Parser.Lex();
5755
5756 // If parse failed, trigger an error but do not return error code
5757 // to avoid excessive error messages.
5758 Imm = parseGPRIdxMacro();
5759
5760 } else {
5761 if (getParser().parseAbsoluteExpression(Imm))
5762 return MatchOperand_NoMatch;
5763 if (Imm < 0 || !isUInt<4>(Imm)) {
5764 Error(S, "invalid immediate: only 4-bit values are legal");
5765 }
5766 }
5767
5768 Operands.push_back(
5769 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyGprIdxMode));
5770 return MatchOperand_Success;
5771}
5772
5773bool AMDGPUOperand::isGPRIdxMode() const {
5774 return isImmTy(ImmTyGprIdxMode);
5775}
5776
5777//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005778// sopp branch targets
5779//===----------------------------------------------------------------------===//
5780
Alex Bradbury58eba092016-11-01 16:32:05 +00005781OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00005782AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00005783
Dmitry Preobrazhensky4ccb7f82019-07-19 13:12:47 +00005784 // Make sure we are not parsing something
5785 // that looks like a label or an expression but is not.
5786 // This will improve error messages.
5787 if (isRegister() || isModifier())
5788 return MatchOperand_NoMatch;
5789
5790 if (parseExpr(Operands)) {
5791
5792 AMDGPUOperand &Opr = ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
5793 assert(Opr.isImm() || Opr.isExpr());
5794 SMLoc Loc = Opr.getStartLoc();
5795
5796 // Currently we do not support arbitrary expressions as branch targets.
5797 // Only labels and absolute expressions are accepted.
5798 if (Opr.isExpr() && !Opr.isSymbolRefExpr()) {
5799 Error(Loc, "expected an absolute expression or a label");
5800 } else if (Opr.isImm() && !Opr.isS16Imm()) {
5801 Error(Loc, "expected a 16-bit signed jump offset");
Tom Stellard45bb48e2015-06-13 03:28:10 +00005802 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005803 }
Dmitry Preobrazhensky4ccb7f82019-07-19 13:12:47 +00005804
5805 return MatchOperand_Success; // avoid excessive error messages
Tom Stellard45bb48e2015-06-13 03:28:10 +00005806}
5807
5808//===----------------------------------------------------------------------===//
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00005809// Boolean holding registers
5810//===----------------------------------------------------------------------===//
5811
5812OperandMatchResultTy
5813AMDGPUAsmParser::parseBoolReg(OperandVector &Operands) {
5814 return parseReg(Operands);
5815}
5816
5817//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005818// mubuf
5819//===----------------------------------------------------------------------===//
5820
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005821AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDLC() const {
5822 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDLC);
5823}
5824
Sam Kolton5f10a132016-05-06 11:31:17 +00005825AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005826 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005827}
5828
5829AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005830 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005831}
5832
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005833void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
5834 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005835 bool IsAtomic,
5836 bool IsAtomicReturn,
5837 bool IsLds) {
5838 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005839 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005840 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005841 assert(IsAtomicReturn ? IsAtomic : true);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005842 unsigned FirstOperandIdx = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005843
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005844 for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00005845 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5846
5847 // Add the register arguments
5848 if (Op.isReg()) {
5849 Op.addRegOperands(Inst, 1);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005850 // Insert a tied src for atomic return dst.
5851 // This cannot be postponed as subsequent calls to
5852 // addImmOperands rely on correct number of MC operands.
5853 if (IsAtomicReturn && i == FirstOperandIdx)
5854 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00005855 continue;
5856 }
5857
5858 // Handle the case where soffset is an immediate
5859 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5860 Op.addImmOperands(Inst, 1);
5861 continue;
5862 }
5863
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00005864 HasLdsModifier |= Op.isLDS();
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005865
Tom Stellard45bb48e2015-06-13 03:28:10 +00005866 // Handle tokens like 'offen' which are sometimes hard-coded into the
5867 // asm string. There are no MCInst operands for these.
5868 if (Op.isToken()) {
5869 continue;
5870 }
5871 assert(Op.isImm());
5872
5873 // Handle optional arguments
5874 OptionalIdx[Op.getImmTy()] = i;
5875 }
5876
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005877 // This is a workaround for an llvm quirk which may result in an
5878 // incorrect instruction selection. Lds and non-lds versions of
5879 // MUBUF instructions are identical except that lds versions
5880 // have mandatory 'lds' modifier. However this modifier follows
5881 // optional modifiers and llvm asm matcher regards this 'lds'
5882 // modifier as an optional one. As a result, an lds version
5883 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005884 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005885 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
5886 if (NoLdsOpcode != -1) { // Got lds version - correct it.
5887 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005888 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005889 }
5890 }
5891
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005892 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005893 if (!IsAtomic) { // glc is hard-coded.
5894 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5895 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005896 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005897
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005898 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005899 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5900 }
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005901
5902 if (isGFX10())
5903 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC);
Tom Stellard45bb48e2015-06-13 03:28:10 +00005904}
5905
David Stuttard70e8bc12017-06-22 16:29:22 +00005906void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
5907 OptionalImmIndexMap OptionalIdx;
5908
5909 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
5910 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5911
5912 // Add the register arguments
5913 if (Op.isReg()) {
5914 Op.addRegOperands(Inst, 1);
5915 continue;
5916 }
5917
5918 // Handle the case where soffset is an immediate
5919 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5920 Op.addImmOperands(Inst, 1);
5921 continue;
5922 }
5923
5924 // Handle tokens like 'offen' which are sometimes hard-coded into the
5925 // asm string. There are no MCInst operands for these.
5926 if (Op.isToken()) {
5927 continue;
5928 }
5929 assert(Op.isImm());
5930
5931 // Handle optional arguments
5932 OptionalIdx[Op.getImmTy()] = i;
5933 }
5934
5935 addOptionalImmOperand(Inst, Operands, OptionalIdx,
5936 AMDGPUOperand::ImmTyOffset);
Tim Renouf35484c92018-08-21 11:06:05 +00005937 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT);
David Stuttard70e8bc12017-06-22 16:29:22 +00005938 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5939 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
5940 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005941
5942 if (isGFX10())
5943 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC);
David Stuttard70e8bc12017-06-22 16:29:22 +00005944}
5945
Tom Stellard45bb48e2015-06-13 03:28:10 +00005946//===----------------------------------------------------------------------===//
5947// mimg
5948//===----------------------------------------------------------------------===//
5949
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005950void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
5951 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00005952 unsigned I = 1;
5953 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5954 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5955 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5956 }
5957
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005958 if (IsAtomic) {
5959 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005960 assert(Desc.getNumDefs() == 1);
5961 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005962 }
5963
Sam Kolton1bdcef72016-05-23 09:59:02 +00005964 OptionalImmIndexMap OptionalIdx;
5965
5966 for (unsigned E = Operands.size(); I != E; ++I) {
5967 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5968
5969 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005970 if (Op.isReg()) {
5971 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005972 } else if (Op.isImmModifier()) {
5973 OptionalIdx[Op.getImmTy()] = I;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005974 } else if (!Op.isToken()) {
Matt Arsenault92b355b2016-11-15 19:34:37 +00005975 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00005976 }
5977 }
5978
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005979 bool IsGFX10 = isGFX10();
5980
Sam Kolton1bdcef72016-05-23 09:59:02 +00005981 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005982 if (IsGFX10)
5983 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDim, -1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005984 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00005985 if (IsGFX10)
5986 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005987 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005988 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005989 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005990 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5991 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005992 if (!IsGFX10)
5993 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005994 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005995}
5996
5997void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005998 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005999}
6000
Tom Stellard45bb48e2015-06-13 03:28:10 +00006001//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00006002// smrd
6003//===----------------------------------------------------------------------===//
6004
Artem Tamazov54bfd542016-10-31 16:07:39 +00006005bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00006006 return isImm() && isUInt<8>(getImm());
6007}
6008
Artem Tamazov54bfd542016-10-31 16:07:39 +00006009bool AMDGPUOperand::isSMRDOffset20() const {
6010 return isImm() && isUInt<20>(getImm());
6011}
6012
Tom Stellard217361c2015-08-06 19:28:38 +00006013bool AMDGPUOperand::isSMRDLiteralOffset() const {
6014 // 32-bit literals are only supported on CI and we only want to use them
6015 // when the offset is > 8-bits.
6016 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
6017}
6018
Artem Tamazov54bfd542016-10-31 16:07:39 +00006019AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
6020 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
6021}
6022
6023AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006024 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00006025}
6026
6027AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006028 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00006029}
6030
Dmitry Preobrazhensky2eff0312019-07-08 14:27:37 +00006031AMDGPUOperand::Ptr AMDGPUAsmParser::defaultFlatOffset() const {
Matt Arsenault9698f1c2017-06-20 19:54:14 +00006032 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
6033}
6034
Tom Stellard217361c2015-08-06 19:28:38 +00006035//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00006036// vop3
6037//===----------------------------------------------------------------------===//
6038
6039static bool ConvertOmodMul(int64_t &Mul) {
6040 if (Mul != 1 && Mul != 2 && Mul != 4)
6041 return false;
6042
6043 Mul >>= 1;
6044 return true;
6045}
6046
6047static bool ConvertOmodDiv(int64_t &Div) {
6048 if (Div == 1) {
6049 Div = 0;
6050 return true;
6051 }
6052
6053 if (Div == 2) {
6054 Div = 3;
6055 return true;
6056 }
6057
6058 return false;
6059}
6060
Nikolay Haustov4f672a32016-04-29 09:02:30 +00006061static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
6062 if (BoundCtrl == 0) {
6063 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00006064 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00006065 }
6066
6067 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00006068 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00006069 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00006070 }
Matt Arsenault12c53892016-11-15 19:58:54 +00006071
Tom Stellard45bb48e2015-06-13 03:28:10 +00006072 return false;
6073}
6074
Nikolay Haustov4f672a32016-04-29 09:02:30 +00006075// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00006076static const OptionalOperand AMDGPUOptionalOperandTable[] = {
6077 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
6078 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
6079 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
6080 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
6081 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
6082 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00006083 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006084 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00006085 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00006086 {"dlc", AMDGPUOperand::ImmTyDLC, true, nullptr},
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00006087 {"format", AMDGPUOperand::ImmTyFORMAT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006088 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
6089 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
Piotr Sobczak265e94e2019-10-02 17:22:36 +00006090 {"swz", AMDGPUOperand::ImmTySWZ, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006091 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00006092 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00006093 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006094 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
6095 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
6096 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
6097 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
Ryan Taylor1f334d02018-08-28 15:07:30 +00006098 {"r128", AMDGPUOperand::ImmTyR128A16, true, nullptr},
6099 {"a16", AMDGPUOperand::ImmTyR128A16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006100 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00006101 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006102 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00006103 {"dim", AMDGPUOperand::ImmTyDim, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006104 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
6105 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
6106 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006107 {"fi", AMDGPUOperand::ImmTyDppFi, false, nullptr},
Sam Kolton05ef1c92016-06-03 10:27:37 +00006108 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
6109 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
6110 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00006111 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00006112 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00006113 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006114 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
6115 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
6116 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00006117 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr},
6118 {"blgp", AMDGPUOperand::ImmTyBLGP, false, nullptr},
6119 {"cbsz", AMDGPUOperand::ImmTyCBSZ, false, nullptr},
6120 {"abid", AMDGPUOperand::ImmTyABID, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00006121};
Tom Stellard45bb48e2015-06-13 03:28:10 +00006122
Alex Bradbury58eba092016-11-01 16:32:05 +00006123OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00006124
6125 OperandMatchResultTy res = parseOptionalOpr(Operands);
6126
6127 // This is a hack to enable hardcoded mandatory operands which follow
6128 // optional operands.
6129 //
6130 // Current design assumes that all operands after the first optional operand
6131 // are also optional. However implementation of some instructions violates
6132 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
6133 //
6134 // To alleviate this problem, we have to (implicitly) parse extra operands
6135 // to make sure autogenerated parser of custom operands never hit hardcoded
6136 // mandatory operands.
6137
Dmitry Preobrazhensky882c3e32019-10-11 14:05:09 +00006138 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
6139 if (res != MatchOperand_Success ||
6140 isToken(AsmToken::EndOfStatement))
6141 break;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00006142
Dmitry Preobrazhensky882c3e32019-10-11 14:05:09 +00006143 trySkipToken(AsmToken::Comma);
6144 res = parseOptionalOpr(Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00006145 }
6146
6147 return res;
6148}
6149
6150OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00006151 OperandMatchResultTy res;
6152 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
6153 // try to parse any optional operand here
6154 if (Op.IsBit) {
6155 res = parseNamedBit(Op.Name, Operands, Op.Type);
6156 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
6157 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00006158 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
6159 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
6160 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
6161 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00006162 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
6163 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006164 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
6165 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
6166 Op.Type == AMDGPUOperand::ImmTyNegLo ||
6167 Op.Type == AMDGPUOperand::ImmTyNegHi) {
6168 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
6169 Op.ConvertResult);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00006170 } else if (Op.Type == AMDGPUOperand::ImmTyDim) {
6171 res = parseDim(Operands);
6172 } else if (Op.Type == AMDGPUOperand::ImmTyFORMAT && !isGFX10()) {
Tim Renouf35484c92018-08-21 11:06:05 +00006173 res = parseDfmtNfmt(Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00006174 } else {
6175 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
6176 }
6177 if (res != MatchOperand_NoMatch) {
6178 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00006179 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00006180 }
6181 return MatchOperand_NoMatch;
6182}
6183
Matt Arsenault12c53892016-11-15 19:58:54 +00006184OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00006185 StringRef Name = Parser.getTok().getString();
6186 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00006187 return parseIntWithPrefix("mul", Operands,
6188 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00006189 }
Matt Arsenault12c53892016-11-15 19:58:54 +00006190
6191 if (Name == "div") {
6192 return parseIntWithPrefix("div", Operands,
6193 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
6194 }
6195
6196 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00006197}
6198
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00006199void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
6200 cvtVOP3P(Inst, Operands);
6201
6202 int Opc = Inst.getOpcode();
6203
6204 int SrcNum;
6205 const int Ops[] = { AMDGPU::OpName::src0,
6206 AMDGPU::OpName::src1,
6207 AMDGPU::OpName::src2 };
6208 for (SrcNum = 0;
6209 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
6210 ++SrcNum);
6211 assert(SrcNum > 0);
6212
6213 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
6214 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
6215
6216 if ((OpSel & (1 << SrcNum)) != 0) {
6217 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
6218 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
6219 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
6220 }
6221}
6222
Sam Koltona3ec5c12016-10-07 14:46:06 +00006223static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
6224 // 1. This operand is input modifiers
6225 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
6226 // 2. This is not last operand
6227 && Desc.NumOperands > (OpNum + 1)
6228 // 3. Next operand is register class
6229 && Desc.OpInfo[OpNum + 1].RegClass != -1
6230 // 4. Next register is not tied to any other operand
6231 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
6232}
6233
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00006234void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
6235{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00006236 OptionalImmIndexMap OptionalIdx;
6237 unsigned Opc = Inst.getOpcode();
6238
6239 unsigned I = 1;
6240 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
6241 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
6242 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
6243 }
6244
6245 for (unsigned E = Operands.size(); I != E; ++I) {
6246 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
6247 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
6248 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
6249 } else if (Op.isInterpSlot() ||
6250 Op.isInterpAttr() ||
6251 Op.isAttrChan()) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00006252 Inst.addOperand(MCOperand::createImm(Op.getImm()));
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00006253 } else if (Op.isImmModifier()) {
6254 OptionalIdx[Op.getImmTy()] = I;
6255 } else {
6256 llvm_unreachable("unhandled operand type");
6257 }
6258 }
6259
6260 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
6261 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
6262 }
6263
6264 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
6265 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
6266 }
6267
6268 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
6269 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
6270 }
6271}
6272
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006273void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
6274 OptionalImmIndexMap &OptionalIdx) {
6275 unsigned Opc = Inst.getOpcode();
6276
Tom Stellarda90b9522016-02-11 03:28:15 +00006277 unsigned I = 1;
6278 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00006279 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00006280 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00006281 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00006282
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006283 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
6284 // This instruction has src modifiers
6285 for (unsigned E = Operands.size(); I != E; ++I) {
6286 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
6287 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
6288 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
6289 } else if (Op.isImmModifier()) {
6290 OptionalIdx[Op.getImmTy()] = I;
6291 } else if (Op.isRegOrImm()) {
6292 Op.addRegOrImmOperands(Inst, 1);
6293 } else {
6294 llvm_unreachable("unhandled operand type");
6295 }
6296 }
6297 } else {
6298 // No src modifiers
6299 for (unsigned E = Operands.size(); I != E; ++I) {
6300 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
6301 if (Op.isMod()) {
6302 OptionalIdx[Op.getImmTy()] = I;
6303 } else {
6304 Op.addRegOrImmOperands(Inst, 1);
6305 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00006306 }
Tom Stellarda90b9522016-02-11 03:28:15 +00006307 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006308
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006309 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
6310 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
6311 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006312
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006313 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
6314 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
6315 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00006316
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00006317 // Special case v_mac_{f16, f32} and v_fmac_{f16, f32} (gfx906/gfx10+):
Sam Koltona3ec5c12016-10-07 14:46:06 +00006318 // it has src2 register operand that is tied to dst operand
6319 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00006320 // should be 0.
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00006321 if (Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
6322 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
Matt Arsenault0084adc2018-04-30 19:08:16 +00006323 Opc == AMDGPU::V_MAC_F32_e64_vi ||
6324 Opc == AMDGPU::V_MAC_F16_e64_vi ||
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00006325 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
6326 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
6327 Opc == AMDGPU::V_FMAC_F16_e64_gfx10) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006328 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006329 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00006330 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
6331 ++it;
6332 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
6333 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00006334}
6335
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006336void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00006337 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006338 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00006339}
6340
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00006341void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
6342 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006343 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00006344 const int Opc = Inst.getOpcode();
6345 const MCInstrDesc &Desc = MII.get(Opc);
6346
6347 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006348
Sam Kolton10ac2fd2017-07-07 15:21:52 +00006349 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006350
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00006351 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
6352 assert(!IsPacked);
6353 Inst.addOperand(Inst.getOperand(0));
6354 }
6355
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006356 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
6357 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006358
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006359 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00006360
6361 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
6362 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00006363 int DefaultVal = IsPacked ? -1 : 0;
6364 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
6365 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00006366 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006367
6368 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
6369 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00006370 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006371 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
6372 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
6373 }
6374
6375 const int Ops[] = { AMDGPU::OpName::src0,
6376 AMDGPU::OpName::src1,
6377 AMDGPU::OpName::src2 };
6378 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
6379 AMDGPU::OpName::src1_modifiers,
6380 AMDGPU::OpName::src2_modifiers };
6381
6382 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006383
6384 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00006385 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006386 unsigned NegLo = 0;
6387 unsigned NegHi = 0;
6388
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00006389 if (OpSelHiIdx != -1) {
6390 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
6391 }
6392
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006393 if (NegLoIdx != -1) {
6394 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
6395 NegLo = Inst.getOperand(NegLoIdx).getImm();
6396 NegHi = Inst.getOperand(NegHiIdx).getImm();
6397 }
6398
6399 for (int J = 0; J < 3; ++J) {
6400 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
6401 if (OpIdx == -1)
6402 break;
6403
6404 uint32_t ModVal = 0;
6405
6406 if ((OpSel & (1 << J)) != 0)
6407 ModVal |= SISrcMods::OP_SEL_0;
6408
6409 if ((OpSelHi & (1 << J)) != 0)
6410 ModVal |= SISrcMods::OP_SEL_1;
6411
6412 if ((NegLo & (1 << J)) != 0)
6413 ModVal |= SISrcMods::NEG;
6414
6415 if ((NegHi & (1 << J)) != 0)
6416 ModVal |= SISrcMods::NEG_HI;
6417
6418 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
6419
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00006420 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00006421 }
6422}
6423
Sam Koltondfa29f72016-03-09 12:29:31 +00006424//===----------------------------------------------------------------------===//
6425// dpp
6426//===----------------------------------------------------------------------===//
6427
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006428bool AMDGPUOperand::isDPP8() const {
6429 return isImmTy(ImmTyDPP8);
6430}
6431
Sam Koltondfa29f72016-03-09 12:29:31 +00006432bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006433 using namespace AMDGPU::DPP;
6434
Sam Koltondfa29f72016-03-09 12:29:31 +00006435 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
6436 if (result) {
6437 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006438 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
6439 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
6440 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
6441 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
6442 (Imm == DppCtrl::WAVE_SHL1) ||
6443 (Imm == DppCtrl::WAVE_ROL1) ||
6444 (Imm == DppCtrl::WAVE_SHR1) ||
6445 (Imm == DppCtrl::WAVE_ROR1) ||
6446 (Imm == DppCtrl::ROW_MIRROR) ||
6447 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
6448 (Imm == DppCtrl::BCAST15) ||
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006449 (Imm == DppCtrl::BCAST31) ||
6450 (Imm >= DppCtrl::ROW_SHARE_FIRST && Imm <= DppCtrl::ROW_SHARE_LAST) ||
6451 (Imm >= DppCtrl::ROW_XMASK_FIRST && Imm <= DppCtrl::ROW_XMASK_LAST);
Sam Koltondfa29f72016-03-09 12:29:31 +00006452 }
6453 return false;
6454}
6455
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00006456//===----------------------------------------------------------------------===//
6457// mAI
6458//===----------------------------------------------------------------------===//
6459
6460bool AMDGPUOperand::isBLGP() const {
6461 return isImm() && getImmTy() == ImmTyBLGP && isUInt<3>(getImm());
6462}
6463
6464bool AMDGPUOperand::isCBSZ() const {
6465 return isImm() && getImmTy() == ImmTyCBSZ && isUInt<3>(getImm());
6466}
6467
6468bool AMDGPUOperand::isABID() const {
6469 return isImm() && getImmTy() == ImmTyABID && isUInt<4>(getImm());
6470}
6471
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00006472bool AMDGPUOperand::isS16Imm() const {
6473 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
6474}
6475
6476bool AMDGPUOperand::isU16Imm() const {
6477 return isImm() && isUInt<16>(getImm());
6478}
6479
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00006480OperandMatchResultTy AMDGPUAsmParser::parseDim(OperandVector &Operands) {
6481 if (!isGFX10())
6482 return MatchOperand_NoMatch;
6483
6484 SMLoc S = Parser.getTok().getLoc();
6485
6486 if (getLexer().isNot(AsmToken::Identifier))
6487 return MatchOperand_NoMatch;
6488 if (getLexer().getTok().getString() != "dim")
6489 return MatchOperand_NoMatch;
6490
6491 Parser.Lex();
6492 if (getLexer().isNot(AsmToken::Colon))
6493 return MatchOperand_ParseFail;
6494
6495 Parser.Lex();
6496
6497 // We want to allow "dim:1D" etc., but the initial 1 is tokenized as an
6498 // integer.
6499 std::string Token;
6500 if (getLexer().is(AsmToken::Integer)) {
6501 SMLoc Loc = getLexer().getTok().getEndLoc();
6502 Token = getLexer().getTok().getString();
6503 Parser.Lex();
6504 if (getLexer().getTok().getLoc() != Loc)
6505 return MatchOperand_ParseFail;
6506 }
6507 if (getLexer().isNot(AsmToken::Identifier))
6508 return MatchOperand_ParseFail;
6509 Token += getLexer().getTok().getString();
6510
6511 StringRef DimId = Token;
6512 if (DimId.startswith("SQ_RSRC_IMG_"))
6513 DimId = DimId.substr(12);
6514
6515 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByAsmSuffix(DimId);
6516 if (!DimInfo)
6517 return MatchOperand_ParseFail;
6518
6519 Parser.Lex();
6520
6521 Operands.push_back(AMDGPUOperand::CreateImm(this, DimInfo->Encoding, S,
6522 AMDGPUOperand::ImmTyDim));
6523 return MatchOperand_Success;
6524}
6525
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006526OperandMatchResultTy AMDGPUAsmParser::parseDPP8(OperandVector &Operands) {
6527 SMLoc S = Parser.getTok().getLoc();
6528 StringRef Prefix;
6529
6530 if (getLexer().getKind() == AsmToken::Identifier) {
6531 Prefix = Parser.getTok().getString();
6532 } else {
6533 return MatchOperand_NoMatch;
6534 }
6535
6536 if (Prefix != "dpp8")
6537 return parseDPPCtrl(Operands);
6538 if (!isGFX10())
6539 return MatchOperand_NoMatch;
6540
6541 // dpp8:[%d,%d,%d,%d,%d,%d,%d,%d]
6542
6543 int64_t Sels[8];
6544
6545 Parser.Lex();
6546 if (getLexer().isNot(AsmToken::Colon))
6547 return MatchOperand_ParseFail;
6548
6549 Parser.Lex();
6550 if (getLexer().isNot(AsmToken::LBrac))
6551 return MatchOperand_ParseFail;
6552
6553 Parser.Lex();
6554 if (getParser().parseAbsoluteExpression(Sels[0]))
6555 return MatchOperand_ParseFail;
6556 if (0 > Sels[0] || 7 < Sels[0])
6557 return MatchOperand_ParseFail;
6558
6559 for (size_t i = 1; i < 8; ++i) {
6560 if (getLexer().isNot(AsmToken::Comma))
6561 return MatchOperand_ParseFail;
6562
6563 Parser.Lex();
6564 if (getParser().parseAbsoluteExpression(Sels[i]))
6565 return MatchOperand_ParseFail;
6566 if (0 > Sels[i] || 7 < Sels[i])
6567 return MatchOperand_ParseFail;
6568 }
6569
6570 if (getLexer().isNot(AsmToken::RBrac))
6571 return MatchOperand_ParseFail;
6572 Parser.Lex();
6573
6574 unsigned DPP8 = 0;
6575 for (size_t i = 0; i < 8; ++i)
6576 DPP8 |= (Sels[i] << (i * 3));
6577
6578 Operands.push_back(AMDGPUOperand::CreateImm(this, DPP8, S, AMDGPUOperand::ImmTyDPP8));
6579 return MatchOperand_Success;
6580}
6581
Alex Bradbury58eba092016-11-01 16:32:05 +00006582OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00006583AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006584 using namespace AMDGPU::DPP;
6585
Sam Koltondfa29f72016-03-09 12:29:31 +00006586 SMLoc S = Parser.getTok().getLoc();
6587 StringRef Prefix;
6588 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00006589
Sam Koltona74cd522016-03-18 15:35:51 +00006590 if (getLexer().getKind() == AsmToken::Identifier) {
6591 Prefix = Parser.getTok().getString();
6592 } else {
6593 return MatchOperand_NoMatch;
6594 }
6595
6596 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006597 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006598 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006599 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006600 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006601 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006602 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00006603 // Check to prevent parseDPPCtrlOps from eating invalid tokens
6604 if (Prefix != "quad_perm"
6605 && Prefix != "row_shl"
6606 && Prefix != "row_shr"
6607 && Prefix != "row_ror"
6608 && Prefix != "wave_shl"
6609 && Prefix != "wave_rol"
6610 && Prefix != "wave_shr"
6611 && Prefix != "wave_ror"
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006612 && Prefix != "row_bcast"
6613 && Prefix != "row_share"
6614 && Prefix != "row_xmask") {
Sam Kolton11de3702016-05-24 12:38:33 +00006615 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00006616 }
6617
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006618 if (!isGFX10() && (Prefix == "row_share" || Prefix == "row_xmask"))
6619 return MatchOperand_NoMatch;
6620
6621 if (!isVI() && !isGFX9() &&
6622 (Prefix == "wave_shl" || Prefix == "wave_shr" ||
6623 Prefix == "wave_rol" || Prefix == "wave_ror" ||
6624 Prefix == "row_bcast"))
6625 return MatchOperand_NoMatch;
6626
Sam Koltona74cd522016-03-18 15:35:51 +00006627 Parser.Lex();
6628 if (getLexer().isNot(AsmToken::Colon))
6629 return MatchOperand_ParseFail;
6630
6631 if (Prefix == "quad_perm") {
6632 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00006633 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006634 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00006635 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006636 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00006637
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006638 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00006639 return MatchOperand_ParseFail;
6640
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006641 for (int i = 0; i < 3; ++i) {
6642 if (getLexer().isNot(AsmToken::Comma))
6643 return MatchOperand_ParseFail;
6644 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00006645
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006646 int64_t Temp;
6647 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
6648 return MatchOperand_ParseFail;
6649 const int shift = i*2 + 2;
6650 Int += (Temp << shift);
6651 }
Sam Koltona74cd522016-03-18 15:35:51 +00006652
Sam Koltona74cd522016-03-18 15:35:51 +00006653 if (getLexer().isNot(AsmToken::RBrac))
6654 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006655 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00006656 } else {
6657 // sel:%d
6658 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006659 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00006660 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00006661
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006662 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006663 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006664 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006665 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006666 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006667 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006668 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006669 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006670 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006671 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006672 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006673 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00006674 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006675 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00006676 } else if (Prefix == "row_bcast") {
6677 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006678 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00006679 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00006680 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00006681 } else {
6682 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00006683 }
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006684 } else if (Prefix == "row_share" && 0 <= Int && Int <= 15) {
6685 Int |= DppCtrl::ROW_SHARE_FIRST;
6686 } else if (Prefix == "row_xmask" && 0 <= Int && Int <= 15) {
6687 Int |= DppCtrl::ROW_XMASK_FIRST;
Sam Koltona74cd522016-03-18 15:35:51 +00006688 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00006689 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00006690 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006691 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006692 }
Sam Koltona74cd522016-03-18 15:35:51 +00006693
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006694 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00006695 return MatchOperand_Success;
6696}
6697
Sam Kolton5f10a132016-05-06 11:31:17 +00006698AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006699 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00006700}
6701
David Stuttard20ea21c2019-03-12 09:52:58 +00006702AMDGPUOperand::Ptr AMDGPUAsmParser::defaultEndpgmImmOperands() const {
6703 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyEndpgm);
6704}
6705
Sam Kolton5f10a132016-05-06 11:31:17 +00006706AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006707 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00006708}
6709
Sam Kolton5f10a132016-05-06 11:31:17 +00006710AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006711 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00006712}
6713
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006714AMDGPUOperand::Ptr AMDGPUAsmParser::defaultFI() const {
6715 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppFi);
6716}
6717
6718void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands, bool IsDPP8) {
Sam Koltondfa29f72016-03-09 12:29:31 +00006719 OptionalImmIndexMap OptionalIdx;
6720
6721 unsigned I = 1;
6722 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
6723 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
6724 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
6725 }
6726
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006727 int Fi = 0;
Sam Koltondfa29f72016-03-09 12:29:31 +00006728 for (unsigned E = Operands.size(); I != E; ++I) {
Valery Pykhtin3d9afa22018-11-30 14:21:56 +00006729 auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(),
6730 MCOI::TIED_TO);
6731 if (TiedTo != -1) {
6732 assert((unsigned)TiedTo < Inst.getNumOperands());
6733 // handle tied old or src2 for MAC instructions
6734 Inst.addOperand(Inst.getOperand(TiedTo));
6735 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006736 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
6737 // Add the register arguments
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00006738 if (Op.isReg() && validateVccOperand(Op.getReg())) {
Sam Kolton07dbde22017-01-20 10:01:25 +00006739 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00006740 // Skip it.
6741 continue;
Simon Pilgrim6f349d82019-04-29 17:34:26 +00006742 }
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006743
6744 if (IsDPP8) {
6745 if (Op.isDPP8()) {
6746 Op.addImmOperands(Inst, 1);
6747 } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
6748 Op.addRegWithFPInputModsOperands(Inst, 2);
6749 } else if (Op.isFI()) {
6750 Fi = Op.getImm();
6751 } else if (Op.isReg()) {
6752 Op.addRegOperands(Inst, 1);
6753 } else {
6754 llvm_unreachable("Invalid operand type");
6755 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006756 } else {
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006757 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
6758 Op.addRegWithFPInputModsOperands(Inst, 2);
6759 } else if (Op.isDPPCtrl()) {
6760 Op.addImmOperands(Inst, 1);
6761 } else if (Op.isImm()) {
6762 // Handle optional arguments
6763 OptionalIdx[Op.getImmTy()] = I;
6764 } else {
6765 llvm_unreachable("Invalid operand type");
6766 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006767 }
6768 }
6769
Stanislav Mekhanoshin245b5ba2019-06-12 18:02:41 +00006770 if (IsDPP8) {
6771 using namespace llvm::AMDGPU::DPP;
6772 Inst.addOperand(MCOperand::createImm(Fi? DPP8_FI_1 : DPP8_FI_0));
6773 } else {
6774 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
6775 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
6776 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
6777 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::fi) != -1) {
6778 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppFi);
6779 }
6780 }
Sam Koltondfa29f72016-03-09 12:29:31 +00006781}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00006782
Sam Kolton3025e7f2016-04-26 13:33:56 +00006783//===----------------------------------------------------------------------===//
6784// sdwa
6785//===----------------------------------------------------------------------===//
6786
Alex Bradbury58eba092016-11-01 16:32:05 +00006787OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00006788AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
6789 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006790 using namespace llvm::AMDGPU::SDWA;
6791
Sam Kolton3025e7f2016-04-26 13:33:56 +00006792 SMLoc S = Parser.getTok().getLoc();
6793 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00006794 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00006795
Sam Kolton05ef1c92016-06-03 10:27:37 +00006796 res = parseStringWithPrefix(Prefix, Value);
6797 if (res != MatchOperand_Success) {
6798 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00006799 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00006800
Sam Kolton3025e7f2016-04-26 13:33:56 +00006801 int64_t Int;
6802 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00006803 .Case("BYTE_0", SdwaSel::BYTE_0)
6804 .Case("BYTE_1", SdwaSel::BYTE_1)
6805 .Case("BYTE_2", SdwaSel::BYTE_2)
6806 .Case("BYTE_3", SdwaSel::BYTE_3)
6807 .Case("WORD_0", SdwaSel::WORD_0)
6808 .Case("WORD_1", SdwaSel::WORD_1)
6809 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00006810 .Default(0xffffffff);
6811 Parser.Lex(); // eat last token
6812
6813 if (Int == 0xffffffff) {
6814 return MatchOperand_ParseFail;
6815 }
6816
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006817 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00006818 return MatchOperand_Success;
6819}
6820
Alex Bradbury58eba092016-11-01 16:32:05 +00006821OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00006822AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006823 using namespace llvm::AMDGPU::SDWA;
6824
Sam Kolton3025e7f2016-04-26 13:33:56 +00006825 SMLoc S = Parser.getTok().getLoc();
6826 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00006827 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00006828
6829 res = parseStringWithPrefix("dst_unused", Value);
6830 if (res != MatchOperand_Success) {
6831 return res;
6832 }
6833
6834 int64_t Int;
6835 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00006836 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
6837 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
6838 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00006839 .Default(0xffffffff);
6840 Parser.Lex(); // eat last token
6841
6842 if (Int == 0xffffffff) {
6843 return MatchOperand_ParseFail;
6844 }
6845
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006846 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00006847 return MatchOperand_Success;
6848}
6849
Sam Kolton945231a2016-06-10 09:57:59 +00006850void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00006851 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00006852}
6853
Sam Kolton945231a2016-06-10 09:57:59 +00006854void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00006855 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
6856}
6857
Sam Koltonf7659d712017-05-23 10:08:55 +00006858void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006859 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true, true);
6860}
6861
6862void AMDGPUAsmParser::cvtSdwaVOP2e(MCInst &Inst, const OperandVector &Operands) {
6863 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, false, true);
Sam Koltonf7659d712017-05-23 10:08:55 +00006864}
6865
Sam Kolton5196b882016-07-01 09:59:21 +00006866void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006867 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00006868}
6869
6870void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006871 uint64_t BasicInstType,
6872 bool SkipDstVcc,
6873 bool SkipSrcVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00006874 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00006875
Sam Kolton05ef1c92016-06-03 10:27:37 +00006876 OptionalImmIndexMap OptionalIdx;
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006877 bool SkipVcc = SkipDstVcc || SkipSrcVcc;
6878 bool SkippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00006879
6880 unsigned I = 1;
6881 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
6882 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
6883 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
6884 }
6885
6886 for (unsigned E = Operands.size(); I != E; ++I) {
6887 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006888 if (SkipVcc && !SkippedVcc && Op.isReg() &&
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +00006889 (Op.getReg() == AMDGPU::VCC || Op.getReg() == AMDGPU::VCC_LO)) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006890 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
6891 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
6892 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
6893 // Skip VCC only if we didn't skip it on previous iteration.
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006894 // Note that src0 and src1 occupy 2 slots each because of modifiers.
Sam Koltonf7659d712017-05-23 10:08:55 +00006895 if (BasicInstType == SIInstrFlags::VOP2 &&
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006896 ((SkipDstVcc && Inst.getNumOperands() == 1) ||
6897 (SkipSrcVcc && Inst.getNumOperands() == 5))) {
6898 SkippedVcc = true;
Sam Koltonf7659d712017-05-23 10:08:55 +00006899 continue;
6900 } else if (BasicInstType == SIInstrFlags::VOPC &&
6901 Inst.getNumOperands() == 0) {
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006902 SkippedVcc = true;
Sam Koltonf7659d712017-05-23 10:08:55 +00006903 continue;
6904 }
6905 }
6906 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00006907 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00006908 } else if (Op.isImm()) {
6909 // Handle optional arguments
6910 OptionalIdx[Op.getImmTy()] = I;
6911 } else {
6912 llvm_unreachable("Invalid operand type");
6913 }
Dmitry Preobrazhensky7d325fe2019-10-18 13:31:53 +00006914 SkippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00006915 }
6916
Stanislav Mekhanoshin4f331cb2019-04-26 23:16:16 +00006917 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx10 &&
6918 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
Sam Koltonf7659d712017-05-23 10:08:55 +00006919 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00006920 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00006921 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006922 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00006923 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00006924 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006925 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
6926 }
Sam Kolton9dffada2017-01-17 15:26:02 +00006927 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
6928 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
6929 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006930 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006931
6932 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00006933 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00006934 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006935 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
6936 }
Sam Kolton9dffada2017-01-17 15:26:02 +00006937 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
6938 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
6939 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
6940 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006941 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006942
6943 case SIInstrFlags::VOPC:
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00006944 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::clamp) != -1)
6945 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00006946 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
6947 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006948 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006949
Sam Koltona3ec5c12016-10-07 14:46:06 +00006950 default:
6951 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
6952 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00006953 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00006954
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00006955 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00006956 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00006957 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
6958 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006959 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00006960 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00006961 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00006962 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00006963 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00006964}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00006965
Stanislav Mekhanoshin9e77d0c2019-07-09 19:41:51 +00006966//===----------------------------------------------------------------------===//
6967// mAI
6968//===----------------------------------------------------------------------===//
6969
6970AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBLGP() const {
6971 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyBLGP);
6972}
6973
6974AMDGPUOperand::Ptr AMDGPUAsmParser::defaultCBSZ() const {
6975 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyCBSZ);
6976}
6977
6978AMDGPUOperand::Ptr AMDGPUAsmParser::defaultABID() const {
6979 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyABID);
6980}
6981
Tom Stellard45bb48e2015-06-13 03:28:10 +00006982/// Force static initialization.
Tom Stellard4b0b2612019-06-11 03:21:13 +00006983extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00006984 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
6985 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00006986}
6987
6988#define GET_REGISTER_MATCHER
6989#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00006990#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00006991#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00006992
Sam Kolton11de3702016-05-24 12:38:33 +00006993// This fuction should be defined after auto-generated include so that we have
6994// MatchClassKind enum defined
6995unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
6996 unsigned Kind) {
6997 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00006998 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00006999 // operand. This method checks if we are given immediate operand but expect to
7000 // get corresponding token.
7001 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
7002 switch (Kind) {
7003 case MCK_addr64:
7004 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
7005 case MCK_gds:
7006 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00007007 case MCK_lds:
7008 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00007009 case MCK_glc:
7010 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
7011 case MCK_idxen:
7012 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
7013 case MCK_offen:
7014 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00007015 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00007016 // When operands have expression values, they will return true for isToken,
7017 // because it is not possible to distinguish between a token and an
7018 // expression at parse time. MatchInstructionImpl() will always try to
7019 // match an operand as a token, when isToken returns true, and when the
7020 // name of the expression is not a valid token, the match will fail,
7021 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00007022 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
7023 case MCK_SSrcF32:
7024 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00007025 case MCK_SoppBrTarget:
7026 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00007027 case MCK_VReg32OrOff:
7028 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00007029 case MCK_InterpSlot:
7030 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
7031 case MCK_Attr:
7032 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
7033 case MCK_AttrChan:
7034 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhensky472c6b02019-10-11 14:35:11 +00007035 case MCK_SReg_64:
7036 case MCK_SReg_64_XEXEC:
7037 // Null is defined as a 32-bit register but
7038 // it should also be enabled with 64-bit operands.
7039 // The following code enables it for SReg_64 operands
7040 // used as source and destination. Remaining source
7041 // operands are handled in isInlinableImm.
7042 return Operand.isNull() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00007043 default:
7044 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00007045 }
7046}
David Stuttard20ea21c2019-03-12 09:52:58 +00007047
7048//===----------------------------------------------------------------------===//
7049// endpgm
7050//===----------------------------------------------------------------------===//
7051
7052OperandMatchResultTy AMDGPUAsmParser::parseEndpgmOp(OperandVector &Operands) {
7053 SMLoc S = Parser.getTok().getLoc();
7054 int64_t Imm = 0;
7055
7056 if (!parseExpr(Imm)) {
7057 // The operand is optional, if not present default to 0
7058 Imm = 0;
7059 }
7060
7061 if (!isUInt<16>(Imm)) {
7062 Error(S, "expected a 16-bit value");
7063 return MatchOperand_ParseFail;
7064 }
7065
7066 Operands.push_back(
7067 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyEndpgm));
7068 return MatchOperand_Success;
7069}
7070
7071bool AMDGPUOperand::isEndpgm() const { return isImmTy(ImmTyEndpgm); }