blob: 3721c1e057ecc9975685a7668470fbcc85683866 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00009#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000014#include "SIInstrInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000015#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000016#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000017#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000019#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000020#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000021#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000022#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000024#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000025#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000027#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000028#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000032#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000033#include "llvm/MC/MCInstrInfo.h"
34#include "llvm/MC/MCParser/MCAsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000036#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000038#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000042#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000043#include "llvm/Support/AMDGPUMetadata.h"
Scott Linder1e8c2c72018-06-21 19:38:56 +000044#include "llvm/Support/AMDHSAKernelDescriptor.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000045#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000046#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000047#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000048#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000049#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000050#include "llvm/Support/SMLoc.h"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +000051#include "llvm/Support/TargetParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000052#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000053#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000054#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <cstring>
58#include <iterator>
59#include <map>
60#include <memory>
61#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000062
Tom Stellard45bb48e2015-06-13 03:28:10 +000063using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000064using namespace llvm::AMDGPU;
Scott Linder1e8c2c72018-06-21 19:38:56 +000065using namespace llvm::amdhsa;
Tom Stellard45bb48e2015-06-13 03:28:10 +000066
67namespace {
68
Sam Kolton1eeb11b2016-09-09 14:44:04 +000069class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000070
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000071enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
72
Sam Kolton1eeb11b2016-09-09 14:44:04 +000073//===----------------------------------------------------------------------===//
74// Operand
75//===----------------------------------------------------------------------===//
76
Tom Stellard45bb48e2015-06-13 03:28:10 +000077class AMDGPUOperand : public MCParsedAsmOperand {
78 enum KindTy {
79 Token,
80 Immediate,
81 Register,
82 Expression
83 } Kind;
84
85 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000086 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000087
88public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000089 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000090 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000091
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000092 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000093
Sam Kolton945231a2016-06-10 09:57:59 +000094 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000095 bool Abs = false;
96 bool Neg = false;
97 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000098
99 bool hasFPModifiers() const { return Abs || Neg; }
100 bool hasIntModifiers() const { return Sext; }
101 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
102
103 int64_t getFPModifiersOperand() const {
104 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000105 Operand |= Abs ? SISrcMods::ABS : 0u;
106 Operand |= Neg ? SISrcMods::NEG : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000107 return Operand;
108 }
109
110 int64_t getIntModifiersOperand() const {
111 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000112 Operand |= Sext ? SISrcMods::SEXT : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000113 return Operand;
114 }
115
116 int64_t getModifiersOperand() const {
117 assert(!(hasFPModifiers() && hasIntModifiers())
118 && "fp and int modifiers should not be used simultaneously");
119 if (hasFPModifiers()) {
120 return getFPModifiersOperand();
121 } else if (hasIntModifiers()) {
122 return getIntModifiersOperand();
123 } else {
124 return 0;
125 }
126 }
127
128 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
129 };
130
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 enum ImmTy {
132 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000133 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000134 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000135 ImmTyOffen,
136 ImmTyIdxen,
137 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000138 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000139 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000140 ImmTyOffset0,
141 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000142 ImmTyGLC,
143 ImmTySLC,
144 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000145 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000146 ImmTyClampSI,
147 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000148 ImmTyDppCtrl,
149 ImmTyDppRowMask,
150 ImmTyDppBankMask,
151 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000152 ImmTySdwaDstSel,
153 ImmTySdwaSrc0Sel,
154 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000155 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000156 ImmTyDMask,
157 ImmTyUNorm,
158 ImmTyDA,
Ryan Taylor1f334d02018-08-28 15:07:30 +0000159 ImmTyR128A16,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000160 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000161 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000162 ImmTyExpCompr,
163 ImmTyExpVM,
Tim Renouf35484c92018-08-21 11:06:05 +0000164 ImmTyFORMAT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000165 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000166 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000167 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000168 ImmTyInterpSlot,
169 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000170 ImmTyAttrChan,
171 ImmTyOpSel,
172 ImmTyOpSelHi,
173 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000174 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000175 ImmTySwizzle,
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000176 ImmTyGprIdxMode,
David Stuttard20ea21c2019-03-12 09:52:58 +0000177 ImmTyEndpgm,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000178 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000179 };
180
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000181private:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000182 struct TokOp {
183 const char *Data;
184 unsigned Length;
185 };
186
187 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000188 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000189 ImmTy Type;
190 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000191 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000192 };
193
194 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000195 unsigned RegNo;
Matt Arsenault7f192982016-08-16 20:28:06 +0000196 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197 };
198
199 union {
200 TokOp Tok;
201 ImmOp Imm;
202 RegOp Reg;
203 const MCExpr *Expr;
204 };
205
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000206public:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000207 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000208 if (Kind == Token)
209 return true;
210
211 if (Kind != Expression || !Expr)
212 return false;
213
214 // When parsing operands, we can't always tell if something was meant to be
215 // a token, like 'gds', or an expression that references a global variable.
216 // In this case, we assume the string is an expression, and if we need to
217 // interpret is a token, then we treat the symbol name as the token.
218 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000219 }
220
221 bool isImm() const override {
222 return Kind == Immediate;
223 }
224
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000225 bool isInlinableImm(MVT type) const;
226 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000227
Tom Stellard45bb48e2015-06-13 03:28:10 +0000228 bool isRegKind() const {
229 return Kind == Register;
230 }
231
232 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000233 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000234 }
235
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000236 bool isRegOrImmWithInputMods(unsigned RCID, MVT type) const {
237 return isRegClass(RCID) || isInlinableImm(type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000238 }
239
Matt Arsenault4bd72362016-12-10 00:39:12 +0000240 bool isRegOrImmWithInt16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000241 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000242 }
243
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000244 bool isRegOrImmWithInt32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000245 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000246 }
247
248 bool isRegOrImmWithInt64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000249 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000250 }
251
Matt Arsenault4bd72362016-12-10 00:39:12 +0000252 bool isRegOrImmWithFP16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000253 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000254 }
255
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000256 bool isRegOrImmWithFP32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000257 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000258 }
259
260 bool isRegOrImmWithFP64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000261 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000262 }
263
Sam Kolton9772eb32017-01-11 11:46:30 +0000264 bool isVReg() const {
265 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
266 isRegClass(AMDGPU::VReg_64RegClassID) ||
267 isRegClass(AMDGPU::VReg_96RegClassID) ||
268 isRegClass(AMDGPU::VReg_128RegClassID) ||
269 isRegClass(AMDGPU::VReg_256RegClassID) ||
270 isRegClass(AMDGPU::VReg_512RegClassID);
271 }
272
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000273 bool isVReg32() const {
274 return isRegClass(AMDGPU::VGPR_32RegClassID);
275 }
276
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000277 bool isVReg32OrOff() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000278 return isOff() || isVReg32();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000279 }
280
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000281 bool isSDWAOperand(MVT type) const;
282 bool isSDWAFP16Operand() const;
283 bool isSDWAFP32Operand() const;
284 bool isSDWAInt16Operand() const;
285 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000286
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000287 bool isImmTy(ImmTy ImmT) const {
288 return isImm() && Imm.Type == ImmT;
289 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000290
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000291 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000292 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000293 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000294
Sam Kolton945231a2016-06-10 09:57:59 +0000295 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
296 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
297 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000298 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
299 bool isDA() const { return isImmTy(ImmTyDA); }
Ryan Taylor1f334d02018-08-28 15:07:30 +0000300 bool isR128A16() const { return isImmTy(ImmTyR128A16); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000301 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000302 bool isOff() const { return isImmTy(ImmTyOff); }
303 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000304 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
305 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000306 bool isOffen() const { return isImmTy(ImmTyOffen); }
307 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
308 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
309 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
Dmitry Preobrazhensky04bd1182019-03-20 17:13:58 +0000310 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<8>(getImm()); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000311 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000312
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000313 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
314 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000315 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000316 bool isLDS() const { return isImmTy(ImmTyLDS); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000317 bool isGLC() const { return isImmTy(ImmTyGLC); }
318 bool isSLC() const { return isImmTy(ImmTySLC); }
319 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000320 bool isD16() const { return isImmTy(ImmTyD16); }
Tim Renouf35484c92018-08-21 11:06:05 +0000321 bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000322 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
323 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
324 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
325 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
326 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
327 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
328 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000329 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
330 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
331 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000332 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
333 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
334 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
335 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000336 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000337
Sam Kolton945231a2016-06-10 09:57:59 +0000338 bool isMod() const {
339 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000340 }
341
342 bool isRegOrImm() const {
343 return isReg() || isImm();
344 }
345
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000346 bool isRegClass(unsigned RCID) const;
347
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +0000348 bool isInlineValue() const;
349
Sam Kolton9772eb32017-01-11 11:46:30 +0000350 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
351 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
352 }
353
Matt Arsenault4bd72362016-12-10 00:39:12 +0000354 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000355 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000356 }
357
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000358 bool isSCSrcV2B16() const {
359 return isSCSrcB16();
360 }
361
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000362 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000363 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000364 }
365
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000366 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000367 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000368 }
369
Matt Arsenault4bd72362016-12-10 00:39:12 +0000370 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000371 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000372 }
373
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000374 bool isSCSrcV2F16() const {
375 return isSCSrcF16();
376 }
377
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000378 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000379 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000380 }
381
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000382 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000383 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000384 }
385
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000386 bool isSSrcB32() const {
387 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
388 }
389
Matt Arsenault4bd72362016-12-10 00:39:12 +0000390 bool isSSrcB16() const {
391 return isSCSrcB16() || isLiteralImm(MVT::i16);
392 }
393
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000394 bool isSSrcV2B16() const {
395 llvm_unreachable("cannot happen");
396 return isSSrcB16();
397 }
398
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000399 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000400 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
401 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000402 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000403 }
404
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000405 bool isSSrcF32() const {
406 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000407 }
408
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000409 bool isSSrcF64() const {
410 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000411 }
412
Matt Arsenault4bd72362016-12-10 00:39:12 +0000413 bool isSSrcF16() const {
414 return isSCSrcB16() || isLiteralImm(MVT::f16);
415 }
416
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000417 bool isSSrcV2F16() const {
418 llvm_unreachable("cannot happen");
419 return isSSrcF16();
420 }
421
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000422 bool isSSrcOrLdsB32() const {
423 return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
424 isLiteralImm(MVT::i32) || isExpr();
425 }
426
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000427 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000428 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000429 }
430
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000431 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000432 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000433 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000434
Matt Arsenault4bd72362016-12-10 00:39:12 +0000435 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000436 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000437 }
438
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000439 bool isVCSrcV2B16() const {
440 return isVCSrcB16();
441 }
442
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000443 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000444 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000445 }
446
447 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000448 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000449 }
450
Matt Arsenault4bd72362016-12-10 00:39:12 +0000451 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000452 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000453 }
454
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000455 bool isVCSrcV2F16() const {
456 return isVCSrcF16();
457 }
458
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000459 bool isVSrcB32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000460 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000461 }
462
463 bool isVSrcB64() const {
464 return isVCSrcF64() || isLiteralImm(MVT::i64);
465 }
466
Matt Arsenault4bd72362016-12-10 00:39:12 +0000467 bool isVSrcB16() const {
468 return isVCSrcF16() || isLiteralImm(MVT::i16);
469 }
470
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000471 bool isVSrcV2B16() const {
472 llvm_unreachable("cannot happen");
473 return isVSrcB16();
474 }
475
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000476 bool isVSrcF32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000477 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000478 }
479
480 bool isVSrcF64() const {
481 return isVCSrcF64() || isLiteralImm(MVT::f64);
482 }
483
Matt Arsenault4bd72362016-12-10 00:39:12 +0000484 bool isVSrcF16() const {
485 return isVCSrcF16() || isLiteralImm(MVT::f16);
486 }
487
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000488 bool isVSrcV2F16() const {
489 llvm_unreachable("cannot happen");
490 return isVSrcF16();
491 }
492
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000493 bool isKImmFP32() const {
494 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000495 }
496
Matt Arsenault4bd72362016-12-10 00:39:12 +0000497 bool isKImmFP16() const {
498 return isLiteralImm(MVT::f16);
499 }
500
Tom Stellard45bb48e2015-06-13 03:28:10 +0000501 bool isMem() const override {
502 return false;
503 }
504
505 bool isExpr() const {
506 return Kind == Expression;
507 }
508
509 bool isSoppBrTarget() const {
510 return isExpr() || isImm();
511 }
512
Sam Kolton945231a2016-06-10 09:57:59 +0000513 bool isSWaitCnt() const;
514 bool isHwreg() const;
515 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000516 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000517 bool isSMRDOffset8() const;
518 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000519 bool isSMRDLiteralOffset() const;
520 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000521 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000522 bool isS16Imm() const;
523 bool isU16Imm() const;
David Stuttard20ea21c2019-03-12 09:52:58 +0000524 bool isEndpgm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000525
Tom Stellard89049702016-06-15 02:54:14 +0000526 StringRef getExpressionAsToken() const {
527 assert(isExpr());
528 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
529 return S->getSymbol().getName();
530 }
531
Sam Kolton945231a2016-06-10 09:57:59 +0000532 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000533 assert(isToken());
534
535 if (Kind == Expression)
536 return getExpressionAsToken();
537
Sam Kolton945231a2016-06-10 09:57:59 +0000538 return StringRef(Tok.Data, Tok.Length);
539 }
540
541 int64_t getImm() const {
542 assert(isImm());
543 return Imm.Val;
544 }
545
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000546 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000547 assert(isImm());
548 return Imm.Type;
549 }
550
551 unsigned getReg() const override {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000552 assert(isRegKind());
Sam Kolton945231a2016-06-10 09:57:59 +0000553 return Reg.RegNo;
554 }
555
Tom Stellard45bb48e2015-06-13 03:28:10 +0000556 SMLoc getStartLoc() const override {
557 return StartLoc;
558 }
559
Peter Collingbourne0da86302016-10-10 22:49:37 +0000560 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000561 return EndLoc;
562 }
563
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000564 SMRange getLocRange() const {
565 return SMRange(StartLoc, EndLoc);
566 }
567
Sam Kolton945231a2016-06-10 09:57:59 +0000568 Modifiers getModifiers() const {
569 assert(isRegKind() || isImmTy(ImmTyNone));
570 return isRegKind() ? Reg.Mods : Imm.Mods;
571 }
572
573 void setModifiers(Modifiers Mods) {
574 assert(isRegKind() || isImmTy(ImmTyNone));
575 if (isRegKind())
576 Reg.Mods = Mods;
577 else
578 Imm.Mods = Mods;
579 }
580
581 bool hasModifiers() const {
582 return getModifiers().hasModifiers();
583 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000584
Sam Kolton945231a2016-06-10 09:57:59 +0000585 bool hasFPModifiers() const {
586 return getModifiers().hasFPModifiers();
587 }
588
589 bool hasIntModifiers() const {
590 return getModifiers().hasIntModifiers();
591 }
592
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000593 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
594
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000595 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000596
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000597 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000598
Matt Arsenault4bd72362016-12-10 00:39:12 +0000599 template <unsigned Bitwidth>
600 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
601
602 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
603 addKImmFPOperands<16>(Inst, N);
604 }
605
606 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
607 addKImmFPOperands<32>(Inst, N);
608 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000609
610 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000611
612 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
613 if (isRegKind())
614 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000615 else if (isExpr())
616 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000617 else
618 addImmOperands(Inst, N);
619 }
620
621 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
622 Modifiers Mods = getModifiers();
623 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
624 if (isRegKind()) {
625 addRegOperands(Inst, N);
626 } else {
627 addImmOperands(Inst, N, false);
628 }
629 }
630
631 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
632 assert(!hasIntModifiers());
633 addRegOrImmWithInputModsOperands(Inst, N);
634 }
635
636 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
637 assert(!hasFPModifiers());
638 addRegOrImmWithInputModsOperands(Inst, N);
639 }
640
Sam Kolton9772eb32017-01-11 11:46:30 +0000641 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
642 Modifiers Mods = getModifiers();
643 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
644 assert(isRegKind());
645 addRegOperands(Inst, N);
646 }
647
648 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
649 assert(!hasIntModifiers());
650 addRegWithInputModsOperands(Inst, N);
651 }
652
653 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
654 assert(!hasFPModifiers());
655 addRegWithInputModsOperands(Inst, N);
656 }
657
Sam Kolton945231a2016-06-10 09:57:59 +0000658 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
659 if (isImm())
660 addImmOperands(Inst, N);
661 else {
662 assert(isExpr());
663 Inst.addOperand(MCOperand::createExpr(Expr));
664 }
665 }
666
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000667 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000668 switch (Type) {
669 case ImmTyNone: OS << "None"; break;
670 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000671 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000672 case ImmTyOffen: OS << "Offen"; break;
673 case ImmTyIdxen: OS << "Idxen"; break;
674 case ImmTyAddr64: OS << "Addr64"; break;
675 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000676 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000677 case ImmTyOffset0: OS << "Offset0"; break;
678 case ImmTyOffset1: OS << "Offset1"; break;
679 case ImmTyGLC: OS << "GLC"; break;
680 case ImmTySLC: OS << "SLC"; break;
681 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000682 case ImmTyD16: OS << "D16"; break;
Tim Renouf35484c92018-08-21 11:06:05 +0000683 case ImmTyFORMAT: OS << "FORMAT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000684 case ImmTyClampSI: OS << "ClampSI"; break;
685 case ImmTyOModSI: OS << "OModSI"; break;
686 case ImmTyDppCtrl: OS << "DppCtrl"; break;
687 case ImmTyDppRowMask: OS << "DppRowMask"; break;
688 case ImmTyDppBankMask: OS << "DppBankMask"; break;
689 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000690 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
691 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
692 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000693 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
694 case ImmTyDMask: OS << "DMask"; break;
695 case ImmTyUNorm: OS << "UNorm"; break;
696 case ImmTyDA: OS << "DA"; break;
Ryan Taylor1f334d02018-08-28 15:07:30 +0000697 case ImmTyR128A16: OS << "R128A16"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000698 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000699 case ImmTyOff: OS << "Off"; break;
700 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000701 case ImmTyExpCompr: OS << "ExpCompr"; break;
702 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000703 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000704 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000705 case ImmTyInterpSlot: OS << "InterpSlot"; break;
706 case ImmTyInterpAttr: OS << "InterpAttr"; break;
707 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000708 case ImmTyOpSel: OS << "OpSel"; break;
709 case ImmTyOpSelHi: OS << "OpSelHi"; break;
710 case ImmTyNegLo: OS << "NegLo"; break;
711 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000712 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000713 case ImmTyGprIdxMode: OS << "GprIdxMode"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000714 case ImmTyHigh: OS << "High"; break;
David Stuttard20ea21c2019-03-12 09:52:58 +0000715 case ImmTyEndpgm:
716 OS << "Endpgm";
717 break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000718 }
719 }
720
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000721 void print(raw_ostream &OS) const override {
722 switch (Kind) {
723 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000724 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000725 break;
726 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000727 OS << '<' << getImm();
728 if (getImmTy() != ImmTyNone) {
729 OS << " type: "; printImmTy(OS, getImmTy());
730 }
Sam Kolton945231a2016-06-10 09:57:59 +0000731 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000732 break;
733 case Token:
734 OS << '\'' << getToken() << '\'';
735 break;
736 case Expression:
737 OS << "<expr " << *Expr << '>';
738 break;
739 }
740 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000741
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000742 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
743 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000744 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000745 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000746 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000747 Op->Imm.Val = Val;
748 Op->Imm.IsFPImm = IsFPImm;
749 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000750 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000751 Op->StartLoc = Loc;
752 Op->EndLoc = Loc;
753 return Op;
754 }
755
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000756 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
757 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000758 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000759 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000760 Res->Tok.Data = Str.data();
761 Res->Tok.Length = Str.size();
762 Res->StartLoc = Loc;
763 Res->EndLoc = Loc;
764 return Res;
765 }
766
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000767 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
768 unsigned RegNo, SMLoc S,
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +0000769 SMLoc E) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000770 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000771 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000772 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000773 Op->StartLoc = S;
774 Op->EndLoc = E;
775 return Op;
776 }
777
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000778 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
779 const class MCExpr *Expr, SMLoc S) {
780 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000781 Op->Expr = Expr;
782 Op->StartLoc = S;
783 Op->EndLoc = S;
784 return Op;
785 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000786};
787
Sam Kolton945231a2016-06-10 09:57:59 +0000788raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
789 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
790 return OS;
791}
792
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000793//===----------------------------------------------------------------------===//
794// AsmParser
795//===----------------------------------------------------------------------===//
796
Artem Tamazova01cce82016-12-27 16:00:11 +0000797// Holds info related to the current kernel, e.g. count of SGPRs used.
798// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
799// .amdgpu_hsa_kernel or at EOF.
800class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000801 int SgprIndexUnusedMin = -1;
802 int VgprIndexUnusedMin = -1;
803 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000804
805 void usesSgprAt(int i) {
806 if (i >= SgprIndexUnusedMin) {
807 SgprIndexUnusedMin = ++i;
808 if (Ctx) {
809 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
810 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
811 }
812 }
813 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000814
Artem Tamazova01cce82016-12-27 16:00:11 +0000815 void usesVgprAt(int i) {
816 if (i >= VgprIndexUnusedMin) {
817 VgprIndexUnusedMin = ++i;
818 if (Ctx) {
819 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
820 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
821 }
822 }
823 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000824
Artem Tamazova01cce82016-12-27 16:00:11 +0000825public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000826 KernelScopeInfo() = default;
827
Artem Tamazova01cce82016-12-27 16:00:11 +0000828 void initialize(MCContext &Context) {
829 Ctx = &Context;
830 usesSgprAt(SgprIndexUnusedMin = -1);
831 usesVgprAt(VgprIndexUnusedMin = -1);
832 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000833
Artem Tamazova01cce82016-12-27 16:00:11 +0000834 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
835 switch (RegKind) {
836 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
837 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
838 default: break;
839 }
840 }
841};
842
Tom Stellard45bb48e2015-06-13 03:28:10 +0000843class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000844 MCAsmParser &Parser;
845
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000846 // Number of extra operands parsed after the first optional operand.
847 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000848 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000849
Eugene Zelenko66203762017-01-21 00:53:49 +0000850 unsigned ForcedEncodingSize = 0;
851 bool ForcedDPP = false;
852 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000853 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000854
Tom Stellard45bb48e2015-06-13 03:28:10 +0000855 /// @name Auto-generated Match Functions
856 /// {
857
858#define GET_ASSEMBLER_HEADER
859#include "AMDGPUGenAsmMatcher.inc"
860
861 /// }
862
Tom Stellard347ac792015-06-26 21:15:07 +0000863private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000864 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000865 bool OutOfRangeError(SMRange Range);
866 /// Calculate VGPR/SGPR blocks required for given target, reserved
867 /// registers, and user-specified NextFreeXGPR values.
868 ///
869 /// \param Features [in] Target features, used for bug corrections.
870 /// \param VCCUsed [in] Whether VCC special SGPR is reserved.
871 /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
872 /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
873 /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
874 /// \param VGPRRange [in] Token range, used for VGPR diagnostics.
875 /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
876 /// \param SGPRRange [in] Token range, used for SGPR diagnostics.
877 /// \param VGPRBlocks [out] Result VGPR block count.
878 /// \param SGPRBlocks [out] Result SGPR block count.
879 bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
880 bool FlatScrUsed, bool XNACKUsed,
881 unsigned NextFreeVGPR, SMRange VGPRRange,
882 unsigned NextFreeSGPR, SMRange SGPRRange,
883 unsigned &VGPRBlocks, unsigned &SGPRBlocks);
884 bool ParseDirectiveAMDGCNTarget();
885 bool ParseDirectiveAMDHSAKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000886 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
887 bool ParseDirectiveHSACodeObjectVersion();
888 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000889 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
890 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000891 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000892 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000893
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000894 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +0000895 bool ParseDirectiveHSAMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +0000896 bool ParseDirectivePALMetadataBegin();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000897 bool ParseDirectivePALMetadata();
898
Tim Renoufe7bd52f2019-03-20 18:47:21 +0000899 /// Common code to parse out a block of text (typically YAML) between start and
900 /// end directives.
901 bool ParseToEndDirective(const char *AssemblerDirectiveBegin,
902 const char *AssemblerDirectiveEnd,
903 std::string &CollectString);
904
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000905 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
906 RegisterKind RegKind, unsigned Reg1,
907 unsigned RegNum);
908 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
909 unsigned& RegNum, unsigned& RegWidth,
910 unsigned *DwordRegIndex);
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +0000911 bool isRegister();
912 bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000913 Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
914 void initializeGprCountSymbol(RegisterKind RegKind);
915 bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
916 unsigned RegWidth);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000917 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +0000918 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000919 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
920 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000921
Tom Stellard45bb48e2015-06-13 03:28:10 +0000922public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000923 enum AMDGPUMatchResultTy {
924 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
925 };
926
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000927 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000928
Akira Hatanakab11ef082015-11-14 06:35:56 +0000929 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000930 const MCInstrInfo &MII,
931 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +0000932 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000933 MCAsmParserExtension::Initialize(Parser);
934
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000935 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000936 // Set default features.
Matt Arsenault45c165b2019-04-03 00:01:03 +0000937 copySTI().ToggleFeature("southern-islands");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000938 }
939
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000940 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000941
942 {
943 // TODO: make those pre-defined variables read-only.
944 // Currently there is none suitable machinery in the core llvm-mc for this.
945 // MCSymbol::isRedefinable is intended for another purpose, and
946 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000947 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Artem Tamazov17091362016-06-14 15:03:59 +0000948 MCContext &Ctx = getContext();
Scott Linder1e8c2c72018-06-21 19:38:56 +0000949 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
950 MCSymbol *Sym =
951 Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
952 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Dmitry Preobrazhensky62a03182019-02-08 13:51:31 +0000953 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_minor"));
954 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
955 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_stepping"));
956 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000957 } else {
958 MCSymbol *Sym =
959 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
960 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
961 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
962 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
963 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
964 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
965 }
966 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
967 initializeGprCountSymbol(IS_VGPR);
968 initializeGprCountSymbol(IS_SGPR);
969 } else
970 KernelScope.initialize(getContext());
Artem Tamazov17091362016-06-14 15:03:59 +0000971 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000972 }
973
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000974 bool hasXNACK() const {
975 return AMDGPU::hasXNACK(getSTI());
976 }
977
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000978 bool hasMIMG_R128() const {
979 return AMDGPU::hasMIMG_R128(getSTI());
980 }
981
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000982 bool hasPackedD16() const {
983 return AMDGPU::hasPackedD16(getSTI());
984 }
985
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000986 bool isSI() const {
987 return AMDGPU::isSI(getSTI());
988 }
989
990 bool isCI() const {
991 return AMDGPU::isCI(getSTI());
992 }
993
994 bool isVI() const {
995 return AMDGPU::isVI(getSTI());
996 }
997
Sam Koltonf7659d712017-05-23 10:08:55 +0000998 bool isGFX9() const {
999 return AMDGPU::isGFX9(getSTI());
1000 }
1001
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001002 bool isGFX10() const {
1003 return AMDGPU::isGFX10(getSTI());
1004 }
1005
Matt Arsenault26faed32016-12-05 22:26:17 +00001006 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001007 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +00001008 }
1009
Matt Arsenaultfd023142017-06-12 15:55:58 +00001010 bool hasFlatOffsets() const {
1011 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
1012 }
1013
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001014 bool hasSGPR102_SGPR103() const {
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00001015 return !isVI() && !isGFX9();
1016 }
1017
1018 bool hasSGPR104_SGPR105() const {
1019 return isGFX10();
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001020 }
1021
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001022 bool hasIntClamp() const {
1023 return getFeatureBits()[AMDGPU::FeatureIntClamp];
1024 }
1025
Tom Stellard347ac792015-06-26 21:15:07 +00001026 AMDGPUTargetStreamer &getTargetStreamer() {
1027 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
1028 return static_cast<AMDGPUTargetStreamer &>(TS);
1029 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001030
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001031 const MCRegisterInfo *getMRI() const {
1032 // We need this const_cast because for some reason getContext() is not const
1033 // in MCAsmParser.
1034 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
1035 }
1036
1037 const MCInstrInfo *getMII() const {
1038 return &MII;
1039 }
1040
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001041 const FeatureBitset &getFeatureBits() const {
1042 return getSTI().getFeatureBits();
1043 }
1044
Sam Kolton05ef1c92016-06-03 10:27:37 +00001045 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
1046 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1047 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +00001048
Sam Kolton05ef1c92016-06-03 10:27:37 +00001049 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
1050 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
1051 bool isForcedDPP() const { return ForcedDPP; }
1052 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +00001053 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001054
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001055 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +00001056 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1057 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +00001058 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
1059 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001060 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1061 OperandVector &Operands, MCStreamer &Out,
1062 uint64_t &ErrorInfo,
1063 bool MatchingInlineAsm) override;
1064 bool ParseDirective(AsmToken DirectiveID) override;
1065 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +00001066 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001067 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1068 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001069 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001070
Sam Kolton11de3702016-05-24 12:38:33 +00001071 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001072
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001073 OperandMatchResultTy
1074 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001075 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001076 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001077
1078 OperandMatchResultTy parseOperandArrayWithPrefix(
1079 const char *Prefix,
1080 OperandVector &Operands,
1081 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1082 bool (*ConvertResult)(int64_t&) = nullptr);
1083
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001084 OperandMatchResultTy
1085 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001086 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001087 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1088 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001089
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001090 bool parseAbsoluteExpr(int64_t &Val, bool HasSP3AbsModifier = false);
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001091 bool parseSP3NegModifier();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001092 OperandMatchResultTy parseImm(OperandVector &Operands, bool HasSP3AbsModifier = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001093 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00001094 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001095 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1096 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1097 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1098 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001099 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Tim Renouf35484c92018-08-21 11:06:05 +00001100 OperandMatchResultTy parseDfmtNfmt(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001101
Tom Stellard45bb48e2015-06-13 03:28:10 +00001102 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001103 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1104 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001105 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001106
1107 bool parseCnt(int64_t &IntVal);
1108 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001109 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001110
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001111private:
1112 struct OperandInfoTy {
1113 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001114 bool IsSymbolic = false;
1115
1116 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001117 };
Sam Kolton11de3702016-05-24 12:38:33 +00001118
Artem Tamazov6edc1352016-05-26 17:00:33 +00001119 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1120 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001121
1122 void errorExpTgt();
1123 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1124
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001125 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00001126 bool validateSOPLiteral(const MCInst &Inst) const;
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001127 bool validateConstantBusLimitations(const MCInst &Inst);
1128 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001129 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001130 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001131 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001132 bool validateMIMGDataSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001133 bool validateMIMGD16(const MCInst &Inst);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001134 bool validateLdsDirect(const MCInst &Inst);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001135 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1136 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1137 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001138
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001139 bool isId(const StringRef Id) const;
1140 bool isId(const AsmToken &Token, const StringRef Id) const;
1141 bool isToken(const AsmToken::TokenKind Kind) const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001142 bool trySkipId(const StringRef Id);
1143 bool trySkipToken(const AsmToken::TokenKind Kind);
1144 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1145 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001146 void peekTokens(MutableArrayRef<AsmToken> Tokens);
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001147 AsmToken::TokenKind getTokenKind() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001148 bool parseExpr(int64_t &Imm);
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001149 StringRef getTokenStr() const;
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001150 AsmToken peekToken();
1151 AsmToken getToken() const;
1152 SMLoc getLoc() const;
1153 void lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001154
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001155public:
Sam Kolton11de3702016-05-24 12:38:33 +00001156 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001157 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001158
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001159 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001160 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001161 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1162 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001163 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1164
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001165 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1166 const unsigned MinVal,
1167 const unsigned MaxVal,
1168 const StringRef ErrMsg);
1169 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1170 bool parseSwizzleOffset(int64_t &Imm);
1171 bool parseSwizzleMacro(int64_t &Imm);
1172 bool parseSwizzleQuadPerm(int64_t &Imm);
1173 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1174 bool parseSwizzleBroadcast(int64_t &Imm);
1175 bool parseSwizzleSwap(int64_t &Imm);
1176 bool parseSwizzleReverse(int64_t &Imm);
1177
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00001178 OperandMatchResultTy parseGPRIdxMode(OperandVector &Operands);
1179 int64_t parseGPRIdxMacro();
1180
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001181 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1182 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1183 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001184 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001185 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1186
Sam Kolton5f10a132016-05-06 11:31:17 +00001187 AMDGPUOperand::Ptr defaultGLC() const;
1188 AMDGPUOperand::Ptr defaultSLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001189
Artem Tamazov54bfd542016-10-31 16:07:39 +00001190 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1191 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001192 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001193 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001194 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001195
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001196 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1197
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001198 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1199 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001200 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001201 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001202 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001203
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001204 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1205
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001206 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1207 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001208 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001209
Sam Kolton11de3702016-05-24 12:38:33 +00001210 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001211 AMDGPUOperand::Ptr defaultRowMask() const;
1212 AMDGPUOperand::Ptr defaultBankMask() const;
1213 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1214 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001215
Sam Kolton05ef1c92016-06-03 10:27:37 +00001216 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1217 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001218 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001219 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1220 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001221 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001222 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1223 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001224 uint64_t BasicInstType, bool skipVcc = false);
David Stuttard20ea21c2019-03-12 09:52:58 +00001225
1226 OperandMatchResultTy parseEndpgmOp(OperandVector &Operands);
1227 AMDGPUOperand::Ptr defaultEndpgmImmOperands() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001228};
1229
1230struct OptionalOperand {
1231 const char *Name;
1232 AMDGPUOperand::ImmTy Type;
1233 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001234 bool (*ConvertResult)(int64_t&);
1235};
1236
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001237} // end anonymous namespace
1238
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001239// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001240static const fltSemantics *getFltSemantics(unsigned Size) {
1241 switch (Size) {
1242 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001243 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001244 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001245 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001246 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001247 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001248 default:
1249 llvm_unreachable("unsupported fp type");
1250 }
1251}
1252
Matt Arsenault4bd72362016-12-10 00:39:12 +00001253static const fltSemantics *getFltSemantics(MVT VT) {
1254 return getFltSemantics(VT.getSizeInBits() / 8);
1255}
1256
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001257static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1258 switch (OperandType) {
1259 case AMDGPU::OPERAND_REG_IMM_INT32:
1260 case AMDGPU::OPERAND_REG_IMM_FP32:
1261 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1262 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1263 return &APFloat::IEEEsingle();
1264 case AMDGPU::OPERAND_REG_IMM_INT64:
1265 case AMDGPU::OPERAND_REG_IMM_FP64:
1266 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1267 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1268 return &APFloat::IEEEdouble();
1269 case AMDGPU::OPERAND_REG_IMM_INT16:
1270 case AMDGPU::OPERAND_REG_IMM_FP16:
1271 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1272 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1273 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1274 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1275 return &APFloat::IEEEhalf();
1276 default:
1277 llvm_unreachable("unsupported fp type");
1278 }
1279}
1280
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001281//===----------------------------------------------------------------------===//
1282// Operand
1283//===----------------------------------------------------------------------===//
1284
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001285static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1286 bool Lost;
1287
1288 // Convert literal to single precision
1289 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1290 APFloat::rmNearestTiesToEven,
1291 &Lost);
1292 // We allow precision lost but not overflow or underflow
1293 if (Status != APFloat::opOK &&
1294 Lost &&
1295 ((Status & APFloat::opOverflow) != 0 ||
1296 (Status & APFloat::opUnderflow) != 0)) {
1297 return false;
1298 }
1299
1300 return true;
1301}
1302
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001303static bool isSafeTruncation(int64_t Val, unsigned Size) {
1304 return isUIntN(Size, Val) || isIntN(Size, Val);
1305}
1306
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001307bool AMDGPUOperand::isInlinableImm(MVT type) const {
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001308
1309 // This is a hack to enable named inline values like
1310 // shared_base with both 32-bit and 64-bit operands.
1311 // Note that these values are defined as
1312 // 32-bit operands only.
1313 if (isInlineValue()) {
1314 return true;
1315 }
1316
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001317 if (!isImmTy(ImmTyNone)) {
1318 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1319 return false;
1320 }
1321 // TODO: We should avoid using host float here. It would be better to
1322 // check the float bit values which is what a few other places do.
1323 // We've had bot failures before due to weird NaN support on mips hosts.
1324
1325 APInt Literal(64, Imm.Val);
1326
1327 if (Imm.IsFPImm) { // We got fp literal token
1328 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001329 return AMDGPU::isInlinableLiteral64(Imm.Val,
1330 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001331 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001332
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001333 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001334 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1335 return false;
1336
Sam Kolton9dffada2017-01-17 15:26:02 +00001337 if (type.getScalarSizeInBits() == 16) {
1338 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001339 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001340 AsmParser->hasInv2PiInlineImm());
1341 }
1342
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001343 // Check if single precision literal is inlinable
1344 return AMDGPU::isInlinableLiteral32(
1345 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001346 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001347 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001348
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001349 // We got int literal token.
1350 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001351 return AMDGPU::isInlinableLiteral64(Imm.Val,
1352 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001353 }
1354
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001355 if (!isSafeTruncation(Imm.Val, type.getScalarSizeInBits())) {
1356 return false;
1357 }
1358
Matt Arsenault4bd72362016-12-10 00:39:12 +00001359 if (type.getScalarSizeInBits() == 16) {
1360 return AMDGPU::isInlinableLiteral16(
1361 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1362 AsmParser->hasInv2PiInlineImm());
1363 }
1364
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001365 return AMDGPU::isInlinableLiteral32(
1366 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001367 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001368}
1369
1370bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001371 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001372 if (!isImmTy(ImmTyNone)) {
1373 return false;
1374 }
1375
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001376 if (!Imm.IsFPImm) {
1377 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001378
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001379 if (type == MVT::f64 && hasFPModifiers()) {
1380 // Cannot apply fp modifiers to int literals preserving the same semantics
1381 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1382 // disable these cases.
1383 return false;
1384 }
1385
Matt Arsenault4bd72362016-12-10 00:39:12 +00001386 unsigned Size = type.getSizeInBits();
1387 if (Size == 64)
1388 Size = 32;
1389
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001390 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1391 // types.
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001392 return isSafeTruncation(Imm.Val, Size);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001393 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001394
1395 // We got fp literal token
1396 if (type == MVT::f64) { // Expected 64-bit fp operand
1397 // We would set low 64-bits of literal to zeroes but we accept this literals
1398 return true;
1399 }
1400
1401 if (type == MVT::i64) { // Expected 64-bit int operand
1402 // We don't allow fp literals in 64-bit integer instructions. It is
1403 // unclear how we should encode them.
1404 return false;
1405 }
1406
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001407 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001408 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001409}
1410
1411bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001412 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001413}
1414
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001415bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001416 if (AsmParser->isVI())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001417 return isVReg32();
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001418 else if (AsmParser->isGFX9() || AsmParser->isGFX10())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001419 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001420 else
1421 return false;
1422}
1423
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001424bool AMDGPUOperand::isSDWAFP16Operand() const {
1425 return isSDWAOperand(MVT::f16);
1426}
1427
1428bool AMDGPUOperand::isSDWAFP32Operand() const {
1429 return isSDWAOperand(MVT::f32);
1430}
1431
1432bool AMDGPUOperand::isSDWAInt16Operand() const {
1433 return isSDWAOperand(MVT::i16);
1434}
1435
1436bool AMDGPUOperand::isSDWAInt32Operand() const {
1437 return isSDWAOperand(MVT::i32);
1438}
1439
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001440uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1441{
1442 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1443 assert(Size == 2 || Size == 4 || Size == 8);
1444
1445 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1446
1447 if (Imm.Mods.Abs) {
1448 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001449 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001450 if (Imm.Mods.Neg) {
1451 Val ^= FpSignMask;
1452 }
1453
1454 return Val;
1455}
1456
1457void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001458 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1459 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001460 addLiteralImmOperand(Inst, Imm.Val,
1461 ApplyModifiers &
1462 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001463 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001464 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1465 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001466 }
1467}
1468
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001469void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001470 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1471 auto OpNum = Inst.getNumOperands();
1472 // Check that this operand accepts literals
1473 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1474
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001475 if (ApplyModifiers) {
1476 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1477 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1478 Val = applyInputFPModifiers(Val, Size);
1479 }
1480
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001481 APInt Literal(64, Val);
1482 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001483
1484 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001485 switch (OpTy) {
1486 case AMDGPU::OPERAND_REG_IMM_INT64:
1487 case AMDGPU::OPERAND_REG_IMM_FP64:
1488 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001489 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001490 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1491 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001492 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001493 return;
1494 }
1495
1496 // Non-inlineable
1497 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001498 // For fp operands we check if low 32 bits are zeros
1499 if (Literal.getLoBits(32) != 0) {
1500 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001501 "Can't encode literal as exact 64-bit floating-point operand. "
1502 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001503 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001504
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001505 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001506 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001507 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001508
1509 // We don't allow fp literals in 64-bit integer instructions. It is
1510 // unclear how we should encode them. This case should be checked earlier
1511 // in predicate methods (isLiteralImm())
1512 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001513
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001514 case AMDGPU::OPERAND_REG_IMM_INT32:
1515 case AMDGPU::OPERAND_REG_IMM_FP32:
1516 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1517 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1518 case AMDGPU::OPERAND_REG_IMM_INT16:
1519 case AMDGPU::OPERAND_REG_IMM_FP16:
1520 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1521 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1522 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1523 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001524 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001525 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001526 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001527 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001528 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001529 // We allow precision lost but not overflow or underflow. This should be
1530 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001531
1532 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001533 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001534 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001535 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001536 default:
1537 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001538 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001539
1540 return;
1541 }
1542
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001543 // We got int literal token.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001544 // Only sign extend inline immediates.
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001545 switch (OpTy) {
1546 case AMDGPU::OPERAND_REG_IMM_INT32:
1547 case AMDGPU::OPERAND_REG_IMM_FP32:
1548 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001549 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001550 if (isSafeTruncation(Val, 32) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001551 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1552 AsmParser->hasInv2PiInlineImm())) {
1553 Inst.addOperand(MCOperand::createImm(Val));
1554 return;
1555 }
1556
1557 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1558 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001559
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001560 case AMDGPU::OPERAND_REG_IMM_INT64:
1561 case AMDGPU::OPERAND_REG_IMM_FP64:
1562 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001563 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001564 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001565 Inst.addOperand(MCOperand::createImm(Val));
1566 return;
1567 }
1568
1569 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1570 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001571
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001572 case AMDGPU::OPERAND_REG_IMM_INT16:
1573 case AMDGPU::OPERAND_REG_IMM_FP16:
1574 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001575 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001576 if (isSafeTruncation(Val, 16) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001577 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1578 AsmParser->hasInv2PiInlineImm())) {
1579 Inst.addOperand(MCOperand::createImm(Val));
1580 return;
1581 }
1582
1583 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1584 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001585
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001586 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1587 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001588 assert(isSafeTruncation(Val, 16));
1589 assert(AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001590 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001591
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001592 Inst.addOperand(MCOperand::createImm(Val));
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001593 return;
1594 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001595 default:
1596 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001597 }
1598}
1599
Matt Arsenault4bd72362016-12-10 00:39:12 +00001600template <unsigned Bitwidth>
1601void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001602 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001603
1604 if (!Imm.IsFPImm) {
1605 // We got int literal token.
1606 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1607 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001608 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001609
1610 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001611 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001612 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1613 APFloat::rmNearestTiesToEven, &Lost);
1614 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001615}
1616
1617void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1618 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1619}
1620
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001621static bool isInlineValue(unsigned Reg) {
1622 switch (Reg) {
1623 case AMDGPU::SRC_SHARED_BASE:
1624 case AMDGPU::SRC_SHARED_LIMIT:
1625 case AMDGPU::SRC_PRIVATE_BASE:
1626 case AMDGPU::SRC_PRIVATE_LIMIT:
1627 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
1628 return true;
1629 default:
1630 return false;
1631 }
1632}
1633
1634bool AMDGPUOperand::isInlineValue() const {
1635 return isRegKind() && ::isInlineValue(getReg());
1636}
1637
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001638//===----------------------------------------------------------------------===//
1639// AsmParser
1640//===----------------------------------------------------------------------===//
1641
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001642static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1643 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001644 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001645 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001646 case 1: return AMDGPU::VGPR_32RegClassID;
1647 case 2: return AMDGPU::VReg_64RegClassID;
1648 case 3: return AMDGPU::VReg_96RegClassID;
1649 case 4: return AMDGPU::VReg_128RegClassID;
1650 case 8: return AMDGPU::VReg_256RegClassID;
1651 case 16: return AMDGPU::VReg_512RegClassID;
1652 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001653 } else if (Is == IS_TTMP) {
1654 switch (RegWidth) {
1655 default: return -1;
1656 case 1: return AMDGPU::TTMP_32RegClassID;
1657 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001658 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001659 case 8: return AMDGPU::TTMP_256RegClassID;
1660 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001661 }
1662 } else if (Is == IS_SGPR) {
1663 switch (RegWidth) {
1664 default: return -1;
1665 case 1: return AMDGPU::SGPR_32RegClassID;
1666 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001667 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001668 case 8: return AMDGPU::SGPR_256RegClassID;
1669 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001670 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001671 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001672 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001673}
1674
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001675static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001676 return StringSwitch<unsigned>(RegName)
1677 .Case("exec", AMDGPU::EXEC)
1678 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001679 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001680 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001681 .Case("shared_base", AMDGPU::SRC_SHARED_BASE)
1682 .Case("src_shared_base", AMDGPU::SRC_SHARED_BASE)
1683 .Case("shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1684 .Case("src_shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1685 .Case("private_base", AMDGPU::SRC_PRIVATE_BASE)
1686 .Case("src_private_base", AMDGPU::SRC_PRIVATE_BASE)
1687 .Case("private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1688 .Case("src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1689 .Case("pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
1690 .Case("src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001691 .Case("lds_direct", AMDGPU::LDS_DIRECT)
1692 .Case("src_lds_direct", AMDGPU::LDS_DIRECT)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001693 .Case("m0", AMDGPU::M0)
1694 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001695 .Case("tba", AMDGPU::TBA)
1696 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001697 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1698 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001699 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1700 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001701 .Case("vcc_lo", AMDGPU::VCC_LO)
1702 .Case("vcc_hi", AMDGPU::VCC_HI)
1703 .Case("exec_lo", AMDGPU::EXEC_LO)
1704 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001705 .Case("tma_lo", AMDGPU::TMA_LO)
1706 .Case("tma_hi", AMDGPU::TMA_HI)
1707 .Case("tba_lo", AMDGPU::TBA_LO)
1708 .Case("tba_hi", AMDGPU::TBA_HI)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00001709 .Case("null", AMDGPU::SGPR_NULL)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001710 .Default(0);
1711}
1712
Eugene Zelenko66203762017-01-21 00:53:49 +00001713bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1714 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001715 auto R = parseRegister();
1716 if (!R) return true;
1717 assert(R->isReg());
1718 RegNo = R->getReg();
1719 StartLoc = R->getStartLoc();
1720 EndLoc = R->getEndLoc();
1721 return false;
1722}
1723
Eugene Zelenko66203762017-01-21 00:53:49 +00001724bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1725 RegisterKind RegKind, unsigned Reg1,
1726 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001727 switch (RegKind) {
1728 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001729 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1730 Reg = AMDGPU::EXEC;
1731 RegWidth = 2;
1732 return true;
1733 }
1734 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1735 Reg = AMDGPU::FLAT_SCR;
1736 RegWidth = 2;
1737 return true;
1738 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001739 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1740 Reg = AMDGPU::XNACK_MASK;
1741 RegWidth = 2;
1742 return true;
1743 }
Eugene Zelenko66203762017-01-21 00:53:49 +00001744 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1745 Reg = AMDGPU::VCC;
1746 RegWidth = 2;
1747 return true;
1748 }
1749 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1750 Reg = AMDGPU::TBA;
1751 RegWidth = 2;
1752 return true;
1753 }
1754 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1755 Reg = AMDGPU::TMA;
1756 RegWidth = 2;
1757 return true;
1758 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001759 return false;
1760 case IS_VGPR:
1761 case IS_SGPR:
1762 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001763 if (Reg1 != Reg + RegWidth) {
1764 return false;
1765 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001766 RegWidth++;
1767 return true;
1768 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001769 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001770 }
1771}
1772
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001773static const StringRef Registers[] = {
1774 { "v" },
1775 { "s" },
1776 { "ttmp" },
1777};
1778
1779bool
1780AMDGPUAsmParser::isRegister(const AsmToken &Token,
1781 const AsmToken &NextToken) const {
1782
1783 // A list of consecutive registers: [s0,s1,s2,s3]
1784 if (Token.is(AsmToken::LBrac))
1785 return true;
1786
1787 if (!Token.is(AsmToken::Identifier))
1788 return false;
1789
1790 // A single register like s0 or a range of registers like s[0:1]
1791
1792 StringRef RegName = Token.getString();
1793
1794 for (StringRef Reg : Registers) {
1795 if (RegName.startswith(Reg)) {
1796 if (Reg.size() < RegName.size()) {
1797 unsigned RegNum;
1798 // A single register with an index: rXX
1799 if (!RegName.substr(Reg.size()).getAsInteger(10, RegNum))
1800 return true;
1801 } else {
1802 // A range of registers: r[XX:YY].
1803 if (NextToken.is(AsmToken::LBrac))
1804 return true;
1805 }
1806 }
1807 }
1808
1809 return getSpecialRegForName(RegName);
1810}
1811
1812bool
1813AMDGPUAsmParser::isRegister()
1814{
1815 return isRegister(getToken(), peekToken());
1816}
1817
Eugene Zelenko66203762017-01-21 00:53:49 +00001818bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1819 unsigned &RegNum, unsigned &RegWidth,
1820 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001821 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001822 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1823 if (getLexer().is(AsmToken::Identifier)) {
1824 StringRef RegName = Parser.getTok().getString();
1825 if ((Reg = getSpecialRegForName(RegName))) {
1826 Parser.Lex();
1827 RegKind = IS_SPECIAL;
1828 } else {
1829 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001830 if (RegName[0] == 'v') {
1831 RegNumIndex = 1;
1832 RegKind = IS_VGPR;
1833 } else if (RegName[0] == 's') {
1834 RegNumIndex = 1;
1835 RegKind = IS_SGPR;
1836 } else if (RegName.startswith("ttmp")) {
1837 RegNumIndex = strlen("ttmp");
1838 RegKind = IS_TTMP;
1839 } else {
1840 return false;
1841 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001842 if (RegName.size() > RegNumIndex) {
1843 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001844 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1845 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001846 Parser.Lex();
1847 RegWidth = 1;
1848 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001849 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001850 Parser.Lex();
1851 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001852 if (getLexer().isNot(AsmToken::LBrac))
1853 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001854 Parser.Lex();
1855
Artem Tamazovf88397c2016-06-03 14:41:17 +00001856 if (getParser().parseAbsoluteExpression(RegLo))
1857 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001858
Artem Tamazov7da9b822016-05-27 12:50:13 +00001859 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001860 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1861 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001862 Parser.Lex();
1863
Artem Tamazov7da9b822016-05-27 12:50:13 +00001864 if (isRBrace) {
1865 RegHi = RegLo;
1866 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001867 if (getParser().parseAbsoluteExpression(RegHi))
1868 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001869
Artem Tamazovf88397c2016-06-03 14:41:17 +00001870 if (getLexer().isNot(AsmToken::RBrac))
1871 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001872 Parser.Lex();
1873 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001874 RegNum = (unsigned) RegLo;
1875 RegWidth = (RegHi - RegLo) + 1;
1876 }
1877 }
1878 } else if (getLexer().is(AsmToken::LBrac)) {
1879 // List of consecutive registers: [s0,s1,s2,s3]
1880 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001881 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001882 return false;
1883 if (RegWidth != 1)
1884 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001885 RegisterKind RegKind1;
1886 unsigned Reg1, RegNum1, RegWidth1;
1887 do {
1888 if (getLexer().is(AsmToken::Comma)) {
1889 Parser.Lex();
1890 } else if (getLexer().is(AsmToken::RBrac)) {
1891 Parser.Lex();
1892 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001893 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001894 if (RegWidth1 != 1) {
1895 return false;
1896 }
1897 if (RegKind1 != RegKind) {
1898 return false;
1899 }
1900 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1901 return false;
1902 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001903 } else {
1904 return false;
1905 }
1906 } while (true);
1907 } else {
1908 return false;
1909 }
1910 switch (RegKind) {
1911 case IS_SPECIAL:
1912 RegNum = 0;
1913 RegWidth = 1;
1914 break;
1915 case IS_VGPR:
1916 case IS_SGPR:
1917 case IS_TTMP:
1918 {
1919 unsigned Size = 1;
1920 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001921 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001922 Size = std::min(RegWidth, 4u);
1923 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001924 if (RegNum % Size != 0)
1925 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001926 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001927 RegNum = RegNum / Size;
1928 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001929 if (RCID == -1)
1930 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001931 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001932 if (RegNum >= RC.getNumRegs())
1933 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001934 Reg = RC.getRegister(RegNum);
1935 break;
1936 }
1937
1938 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001939 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001940 }
1941
Artem Tamazovf88397c2016-06-03 14:41:17 +00001942 if (!subtargetHasRegister(*TRI, Reg))
1943 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001944 return true;
1945}
1946
Scott Linder1e8c2c72018-06-21 19:38:56 +00001947Optional<StringRef>
1948AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
1949 switch (RegKind) {
1950 case IS_VGPR:
1951 return StringRef(".amdgcn.next_free_vgpr");
1952 case IS_SGPR:
1953 return StringRef(".amdgcn.next_free_sgpr");
1954 default:
1955 return None;
1956 }
1957}
1958
1959void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
1960 auto SymbolName = getGprCountSymbolName(RegKind);
1961 assert(SymbolName && "initializing invalid register kind");
1962 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1963 Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
1964}
1965
1966bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
1967 unsigned DwordRegIndex,
1968 unsigned RegWidth) {
1969 // Symbols are only defined for GCN targets
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00001970 if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6)
Scott Linder1e8c2c72018-06-21 19:38:56 +00001971 return true;
1972
1973 auto SymbolName = getGprCountSymbolName(RegKind);
1974 if (!SymbolName)
1975 return true;
1976 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1977
1978 int64_t NewMax = DwordRegIndex + RegWidth - 1;
1979 int64_t OldCount;
1980
1981 if (!Sym->isVariable())
1982 return !Error(getParser().getTok().getLoc(),
1983 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
1984 if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
1985 return !Error(
1986 getParser().getTok().getLoc(),
1987 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
1988
1989 if (OldCount <= NewMax)
1990 Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
1991
1992 return true;
1993}
1994
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001995std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001996 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001997 SMLoc StartLoc = Tok.getLoc();
1998 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001999 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00002000 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002001
Artem Tamazova01cce82016-12-27 16:00:11 +00002002 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002003 //FIXME: improve error messages (bug 41303).
2004 Error(StartLoc, "not a valid operand.");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00002005 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002006 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00002007 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
2008 if (!updateGprCountSymbols(RegKind, DwordRegIndex, RegWidth))
2009 return nullptr;
2010 } else
2011 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002012 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002013}
2014
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002015bool
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002016AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool HasSP3AbsModifier) {
2017 if (HasSP3AbsModifier) {
2018 // This is a workaround for handling expressions
2019 // as arguments of SP3 'abs' modifier, for example:
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002020 // |1.0|
2021 // |-1|
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002022 // |1+x|
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002023 // This syntax is not compatible with syntax of standard
2024 // MC expressions (due to the trailing '|').
2025
2026 SMLoc EndLoc;
2027 const MCExpr *Expr;
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002028 SMLoc StartLoc = getLoc();
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002029
2030 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
2031 return true;
2032 }
2033
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002034 if (!Expr->evaluateAsAbsolute(Val))
2035 return Error(StartLoc, "expected absolute expression");
2036
2037 return false;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002038 }
2039
2040 return getParser().parseAbsoluteExpression(Val);
2041}
2042
Alex Bradbury58eba092016-11-01 16:32:05 +00002043OperandMatchResultTy
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002044AMDGPUAsmParser::parseImm(OperandVector &Operands, bool HasSP3AbsModifier) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002045 // TODO: add syntactic sugar for 1/(2*PI)
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002046
2047 const auto& Tok = getToken();
2048 const auto& NextTok = peekToken();
2049 bool IsReal = Tok.is(AsmToken::Real);
2050 SMLoc S = Tok.getLoc();
2051 bool Negate = false;
2052
2053 if (!IsReal && Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Real)) {
2054 lex();
2055 IsReal = true;
2056 Negate = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002057 }
2058
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002059 if (IsReal) {
2060 // Floating-point expressions are not supported.
2061 // Can only allow floating-point literals with an
2062 // optional sign.
2063
2064 StringRef Num = getTokenStr();
2065 lex();
2066
2067 APFloat RealVal(APFloat::IEEEdouble());
2068 auto roundMode = APFloat::rmNearestTiesToEven;
2069 if (RealVal.convertFromString(Num, roundMode) == APFloat::opInvalidOp) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002070 return MatchOperand_ParseFail;
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002071 }
2072 if (Negate)
2073 RealVal.changeSign();
2074
2075 Operands.push_back(
2076 AMDGPUOperand::CreateImm(this, RealVal.bitcastToAPInt().getZExtValue(), S,
2077 AMDGPUOperand::ImmTyNone, true));
2078
2079 return MatchOperand_Success;
2080
2081 // FIXME: Should enable arbitrary expressions here
2082 } else if (Tok.is(AsmToken::Integer) ||
2083 (Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Integer))){
2084
2085 int64_t IntVal;
2086 if (parseAbsoluteExpr(IntVal, HasSP3AbsModifier))
2087 return MatchOperand_ParseFail;
2088
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002089 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00002090 return MatchOperand_Success;
2091 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002092
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002093 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002094}
2095
Alex Bradbury58eba092016-11-01 16:32:05 +00002096OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00002097AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002098 if (!isRegister())
2099 return MatchOperand_NoMatch;
2100
Sam Kolton1bdcef72016-05-23 09:59:02 +00002101 if (auto R = parseRegister()) {
2102 assert(R->isReg());
Sam Kolton1bdcef72016-05-23 09:59:02 +00002103 Operands.push_back(std::move(R));
2104 return MatchOperand_Success;
2105 }
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002106 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002107}
2108
Alex Bradbury58eba092016-11-01 16:32:05 +00002109OperandMatchResultTy
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002110AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002111 auto res = parseReg(Operands);
2112 return (res == MatchOperand_NoMatch)?
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002113 parseImm(Operands, HasSP3AbsMod) :
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002114 res;
Sam Kolton9772eb32017-01-11 11:46:30 +00002115}
2116
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002117// Check if the current token is an SP3 'neg' modifier.
2118// Currently this modifier is allowed in the following context:
2119//
2120// 1. Before a register, e.g. "-v0", "-v[...]" or "-[v0,v1]".
2121// 2. Before an 'abs' modifier: -abs(...)
2122// 3. Before an SP3 'abs' modifier: -|...|
2123//
2124// In all other cases "-" is handled as a part
2125// of an expression that follows the sign.
2126//
2127// Note: When "-" is followed by an integer literal,
2128// this is interpreted as integer negation rather
2129// than a floating-point NEG modifier applied to N.
2130// Beside being contr-intuitive, such use of floating-point
2131// NEG modifier would have resulted in different meaning
2132// of integer literals used with VOP1/2/C and VOP3,
2133// for example:
2134// v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
2135// v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
2136// Negative fp literals with preceding "-" are
2137// handled likewise for unifomtity
2138//
2139bool
2140AMDGPUAsmParser::parseSP3NegModifier() {
2141
2142 AsmToken NextToken[2];
2143 peekTokens(NextToken);
2144
2145 if (isToken(AsmToken::Minus) &&
2146 (isRegister(NextToken[0], NextToken[1]) ||
2147 NextToken[0].is(AsmToken::Pipe) ||
2148 isId(NextToken[0], "abs"))) {
2149 lex();
2150 return true;
2151 }
2152
2153 return false;
2154}
2155
Sam Kolton9772eb32017-01-11 11:46:30 +00002156OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002157AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
2158 bool AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002159 bool Neg, SP3Neg;
2160 bool Abs, SP3Abs;
2161 SMLoc Loc;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002162
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002163 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
2164 if (isToken(AsmToken::Minus) && peekToken().is(AsmToken::Minus)) {
2165 Error(getLoc(), "invalid syntax, expected 'neg' modifier");
2166 return MatchOperand_ParseFail;
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002167 }
2168
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002169 SP3Neg = parseSP3NegModifier();
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002170
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002171 Loc = getLoc();
2172 Neg = trySkipId("neg");
2173 if (Neg && SP3Neg) {
2174 Error(Loc, "expected register or immediate");
2175 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002176 }
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002177 if (Neg && !skipToken(AsmToken::LParen, "expected left paren after neg"))
2178 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002179
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002180 Abs = trySkipId("abs");
2181 if (Abs && !skipToken(AsmToken::LParen, "expected left paren after abs"))
2182 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002183
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002184 Loc = getLoc();
2185 SP3Abs = trySkipToken(AsmToken::Pipe);
2186 if (Abs && SP3Abs) {
2187 Error(Loc, "expected register or immediate");
2188 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002189 }
2190
Sam Kolton9772eb32017-01-11 11:46:30 +00002191 OperandMatchResultTy Res;
2192 if (AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002193 Res = parseRegOrImm(Operands, SP3Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00002194 } else {
2195 Res = parseReg(Operands);
2196 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002197 if (Res != MatchOperand_Success) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002198 return (SP3Neg || Neg || SP3Abs || Abs)? MatchOperand_ParseFail : Res;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002199 }
2200
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002201 if (SP3Abs && !skipToken(AsmToken::Pipe, "expected vertical bar"))
2202 return MatchOperand_ParseFail;
2203 if (Abs && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2204 return MatchOperand_ParseFail;
2205 if (Neg && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2206 return MatchOperand_ParseFail;
2207
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002208 AMDGPUOperand::Modifiers Mods;
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002209 Mods.Abs = Abs || SP3Abs;
2210 Mods.Neg = Neg || SP3Neg;
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002211
Sam Kolton945231a2016-06-10 09:57:59 +00002212 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002213 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002214 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00002215 }
2216 return MatchOperand_Success;
2217}
2218
Alex Bradbury58eba092016-11-01 16:32:05 +00002219OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002220AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
2221 bool AllowImm) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002222 bool Sext = trySkipId("sext");
2223 if (Sext && !skipToken(AsmToken::LParen, "expected left paren after sext"))
2224 return MatchOperand_ParseFail;
Sam Kolton945231a2016-06-10 09:57:59 +00002225
Sam Kolton9772eb32017-01-11 11:46:30 +00002226 OperandMatchResultTy Res;
2227 if (AllowImm) {
2228 Res = parseRegOrImm(Operands);
2229 } else {
2230 Res = parseReg(Operands);
2231 }
Sam Kolton945231a2016-06-10 09:57:59 +00002232 if (Res != MatchOperand_Success) {
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002233 return Sext? MatchOperand_ParseFail : Res;
Sam Kolton945231a2016-06-10 09:57:59 +00002234 }
2235
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002236 if (Sext && !skipToken(AsmToken::RParen, "expected closing parentheses"))
2237 return MatchOperand_ParseFail;
2238
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002239 AMDGPUOperand::Modifiers Mods;
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00002240 Mods.Sext = Sext;
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002241
Sam Kolton945231a2016-06-10 09:57:59 +00002242 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002243 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002244 Op.setModifiers(Mods);
2245 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002246
Sam Kolton945231a2016-06-10 09:57:59 +00002247 return MatchOperand_Success;
2248}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002249
Sam Kolton9772eb32017-01-11 11:46:30 +00002250OperandMatchResultTy
2251AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2252 return parseRegOrImmWithFPInputMods(Operands, false);
2253}
2254
2255OperandMatchResultTy
2256AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2257 return parseRegOrImmWithIntInputMods(Operands, false);
2258}
2259
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002260OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002261 auto Loc = getLoc();
2262 if (trySkipId("off")) {
2263 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Loc,
2264 AMDGPUOperand::ImmTyOff, false));
2265 return MatchOperand_Success;
2266 }
2267
2268 if (!isRegister())
2269 return MatchOperand_NoMatch;
2270
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002271 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2272 if (Reg) {
2273 Operands.push_back(std::move(Reg));
2274 return MatchOperand_Success;
2275 }
2276
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002277 return MatchOperand_ParseFail;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002278
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002279}
2280
Tom Stellard45bb48e2015-06-13 03:28:10 +00002281unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002282 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2283
2284 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002285 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2286 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2287 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002288 return Match_InvalidOperand;
2289
Tom Stellard88e0b252015-10-06 15:57:53 +00002290 if ((TSFlags & SIInstrFlags::VOP3) &&
2291 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2292 getForcedEncodingSize() != 64)
2293 return Match_PreferE32;
2294
Sam Koltona568e3d2016-12-22 12:57:41 +00002295 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2296 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002297 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002298 auto OpNum =
2299 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002300 const auto &Op = Inst.getOperand(OpNum);
2301 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2302 return Match_InvalidOperand;
2303 }
2304 }
2305
Matt Arsenaultfd023142017-06-12 15:55:58 +00002306 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2307 // FIXME: Produces error without correct column reported.
2308 auto OpNum =
2309 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2310 const auto &Op = Inst.getOperand(OpNum);
2311 if (Op.getImm() != 0)
2312 return Match_InvalidOperand;
2313 }
2314
Tom Stellard45bb48e2015-06-13 03:28:10 +00002315 return Match_Success;
2316}
2317
Matt Arsenault5f45e782017-01-09 18:44:11 +00002318// What asm variants we should check
2319ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2320 if (getForcedEncodingSize() == 32) {
2321 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2322 return makeArrayRef(Variants);
2323 }
2324
2325 if (isForcedVOP3()) {
2326 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2327 return makeArrayRef(Variants);
2328 }
2329
2330 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002331 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2332 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002333 return makeArrayRef(Variants);
2334 }
2335
2336 if (isForcedDPP()) {
2337 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2338 return makeArrayRef(Variants);
2339 }
2340
2341 static const unsigned Variants[] = {
2342 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002343 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002344 };
2345
2346 return makeArrayRef(Variants);
2347}
2348
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002349unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2350 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2351 const unsigned Num = Desc.getNumImplicitUses();
2352 for (unsigned i = 0; i < Num; ++i) {
2353 unsigned Reg = Desc.ImplicitUses[i];
2354 switch (Reg) {
2355 case AMDGPU::FLAT_SCR:
2356 case AMDGPU::VCC:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00002357 case AMDGPU::VCC_LO:
2358 case AMDGPU::VCC_HI:
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002359 case AMDGPU::M0:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00002360 case AMDGPU::SGPR_NULL:
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002361 return Reg;
2362 default:
2363 break;
2364 }
2365 }
2366 return AMDGPU::NoRegister;
2367}
2368
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002369// NB: This code is correct only when used to check constant
2370// bus limitations because GFX7 support no f16 inline constants.
2371// Note that there are no cases when a GFX7 opcode violates
2372// constant bus limitations due to the use of an f16 constant.
2373bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2374 unsigned OpIdx) const {
2375 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2376
2377 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2378 return false;
2379 }
2380
2381 const MCOperand &MO = Inst.getOperand(OpIdx);
2382
2383 int64_t Val = MO.getImm();
2384 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2385
2386 switch (OpSize) { // expected operand size
2387 case 8:
2388 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2389 case 4:
2390 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2391 case 2: {
2392 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2393 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2394 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2395 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2396 } else {
2397 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2398 }
2399 }
2400 default:
2401 llvm_unreachable("invalid operand size");
2402 }
2403}
2404
2405bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2406 const MCOperand &MO = Inst.getOperand(OpIdx);
2407 if (MO.isImm()) {
2408 return !isInlineConstant(Inst, OpIdx);
2409 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002410 return !MO.isReg() ||
2411 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002412}
2413
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002414bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002415 const unsigned Opcode = Inst.getOpcode();
2416 const MCInstrDesc &Desc = MII.get(Opcode);
2417 unsigned ConstantBusUseCount = 0;
2418
2419 if (Desc.TSFlags &
2420 (SIInstrFlags::VOPC |
2421 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002422 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2423 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002424 // Check special imm operands (used by madmk, etc)
2425 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2426 ++ConstantBusUseCount;
2427 }
2428
2429 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2430 if (SGPRUsed != AMDGPU::NoRegister) {
2431 ++ConstantBusUseCount;
2432 }
2433
2434 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2435 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2436 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2437
2438 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2439
2440 for (int OpIdx : OpIndices) {
2441 if (OpIdx == -1) break;
2442
2443 const MCOperand &MO = Inst.getOperand(OpIdx);
2444 if (usesConstantBus(Inst, OpIdx)) {
2445 if (MO.isReg()) {
2446 const unsigned Reg = mc2PseudoReg(MO.getReg());
2447 // Pairs of registers with a partial intersections like these
2448 // s0, s[0:1]
2449 // flat_scratch_lo, flat_scratch
2450 // flat_scratch_lo, flat_scratch_hi
2451 // are theoretically valid but they are disabled anyway.
2452 // Note that this code mimics SIInstrInfo::verifyInstruction
2453 if (Reg != SGPRUsed) {
2454 ++ConstantBusUseCount;
2455 }
2456 SGPRUsed = Reg;
2457 } else { // Expression or a literal
2458 ++ConstantBusUseCount;
2459 }
2460 }
2461 }
2462 }
2463
2464 return ConstantBusUseCount <= 1;
2465}
2466
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002467bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002468 const unsigned Opcode = Inst.getOpcode();
2469 const MCInstrDesc &Desc = MII.get(Opcode);
2470
2471 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2472 if (DstIdx == -1 ||
2473 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2474 return true;
2475 }
2476
2477 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2478
2479 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2480 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2481 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2482
2483 assert(DstIdx != -1);
2484 const MCOperand &Dst = Inst.getOperand(DstIdx);
2485 assert(Dst.isReg());
2486 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2487
2488 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2489
2490 for (int SrcIdx : SrcIndices) {
2491 if (SrcIdx == -1) break;
2492 const MCOperand &Src = Inst.getOperand(SrcIdx);
2493 if (Src.isReg()) {
2494 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2495 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2496 return false;
2497 }
2498 }
2499 }
2500
2501 return true;
2502}
2503
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002504bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2505
2506 const unsigned Opc = Inst.getOpcode();
2507 const MCInstrDesc &Desc = MII.get(Opc);
2508
2509 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2510 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2511 assert(ClampIdx != -1);
2512 return Inst.getOperand(ClampIdx).getImm() == 0;
2513 }
2514
2515 return true;
2516}
2517
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002518bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2519
2520 const unsigned Opc = Inst.getOpcode();
2521 const MCInstrDesc &Desc = MII.get(Opc);
2522
2523 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2524 return true;
2525
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002526 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2527 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2528 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2529
2530 assert(VDataIdx != -1);
2531 assert(DMaskIdx != -1);
2532 assert(TFEIdx != -1);
2533
2534 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2535 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2536 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2537 if (DMask == 0)
2538 DMask = 1;
2539
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002540 unsigned DataSize =
2541 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2542 if (hasPackedD16()) {
2543 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2544 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2545 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002546 }
2547
2548 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002549}
2550
2551bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2552
2553 const unsigned Opc = Inst.getOpcode();
2554 const MCInstrDesc &Desc = MII.get(Opc);
2555
2556 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2557 return true;
2558 if (!Desc.mayLoad() || !Desc.mayStore())
2559 return true; // Not atomic
2560
2561 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2562 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2563
2564 // This is an incomplete check because image_atomic_cmpswap
2565 // may only use 0x3 and 0xf while other atomic operations
2566 // may use 0x1 and 0x3. However these limitations are
2567 // verified when we check that dmask matches dst size.
2568 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2569}
2570
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002571bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
2572
2573 const unsigned Opc = Inst.getOpcode();
2574 const MCInstrDesc &Desc = MII.get(Opc);
2575
2576 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
2577 return true;
2578
2579 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2580 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2581
2582 // GATHER4 instructions use dmask in a different fashion compared to
2583 // other MIMG instructions. The only useful DMASK values are
2584 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2585 // (red,red,red,red) etc.) The ISA document doesn't mention
2586 // this.
2587 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
2588}
2589
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002590bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2591
2592 const unsigned Opc = Inst.getOpcode();
2593 const MCInstrDesc &Desc = MII.get(Opc);
2594
2595 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2596 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002597
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002598 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2599 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
2600 if (isCI() || isSI())
2601 return false;
2602 }
2603
2604 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002605}
2606
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002607static bool IsRevOpcode(const unsigned Opcode)
2608{
2609 switch (Opcode) {
2610 case AMDGPU::V_SUBREV_F32_e32:
2611 case AMDGPU::V_SUBREV_F32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002612 case AMDGPU::V_SUBREV_F32_e32_gfx10:
2613 case AMDGPU::V_SUBREV_F32_e32_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002614 case AMDGPU::V_SUBREV_F32_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002615 case AMDGPU::V_SUBREV_F32_e64_gfx10:
2616 case AMDGPU::V_SUBREV_F32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002617 case AMDGPU::V_SUBREV_F32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002618
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002619 case AMDGPU::V_SUBREV_I32_e32:
2620 case AMDGPU::V_SUBREV_I32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002621 case AMDGPU::V_SUBREV_I32_e32_gfx6_gfx7:
2622 case AMDGPU::V_SUBREV_I32_e64_gfx6_gfx7:
2623
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002624 case AMDGPU::V_SUBBREV_U32_e32:
2625 case AMDGPU::V_SUBBREV_U32_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002626 case AMDGPU::V_SUBBREV_U32_e32_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002627 case AMDGPU::V_SUBBREV_U32_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002628 case AMDGPU::V_SUBBREV_U32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002629 case AMDGPU::V_SUBBREV_U32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002630
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002631 case AMDGPU::V_SUBREV_U32_e32:
2632 case AMDGPU::V_SUBREV_U32_e64:
2633 case AMDGPU::V_SUBREV_U32_e32_gfx9:
2634 case AMDGPU::V_SUBREV_U32_e32_vi:
2635 case AMDGPU::V_SUBREV_U32_e64_gfx9:
2636 case AMDGPU::V_SUBREV_U32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002637
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002638 case AMDGPU::V_SUBREV_F16_e32:
2639 case AMDGPU::V_SUBREV_F16_e64:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002640 case AMDGPU::V_SUBREV_F16_e32_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002641 case AMDGPU::V_SUBREV_F16_e32_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002642 case AMDGPU::V_SUBREV_F16_e64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002643 case AMDGPU::V_SUBREV_F16_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002644
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002645 case AMDGPU::V_SUBREV_U16_e32:
2646 case AMDGPU::V_SUBREV_U16_e64:
2647 case AMDGPU::V_SUBREV_U16_e32_vi:
2648 case AMDGPU::V_SUBREV_U16_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002649
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002650 case AMDGPU::V_SUBREV_CO_U32_e32_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002651 case AMDGPU::V_SUBREV_CO_U32_e64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002652 case AMDGPU::V_SUBREV_CO_U32_e64_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002653
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002654 case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9:
2655 case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002656
2657 case AMDGPU::V_SUBREV_NC_U32_e32_gfx10:
2658 case AMDGPU::V_SUBREV_NC_U32_e64_gfx10:
2659
2660 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
2661 case AMDGPU::V_SUBREV_CO_CI_U32_e64_gfx10:
2662
2663 case AMDGPU::V_LSHRREV_B32_e32:
2664 case AMDGPU::V_LSHRREV_B32_e64:
2665 case AMDGPU::V_LSHRREV_B32_e32_gfx6_gfx7:
2666 case AMDGPU::V_LSHRREV_B32_e64_gfx6_gfx7:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002667 case AMDGPU::V_LSHRREV_B32_e32_vi:
2668 case AMDGPU::V_LSHRREV_B32_e64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002669 case AMDGPU::V_LSHRREV_B32_e32_gfx10:
2670 case AMDGPU::V_LSHRREV_B32_e64_gfx10:
2671
2672 case AMDGPU::V_ASHRREV_I32_e32:
2673 case AMDGPU::V_ASHRREV_I32_e64:
2674 case AMDGPU::V_ASHRREV_I32_e32_gfx10:
2675 case AMDGPU::V_ASHRREV_I32_e32_gfx6_gfx7:
2676 case AMDGPU::V_ASHRREV_I32_e32_vi:
2677 case AMDGPU::V_ASHRREV_I32_e64_gfx10:
2678 case AMDGPU::V_ASHRREV_I32_e64_gfx6_gfx7:
2679 case AMDGPU::V_ASHRREV_I32_e64_vi:
2680
2681 case AMDGPU::V_LSHLREV_B32_e32:
2682 case AMDGPU::V_LSHLREV_B32_e64:
2683 case AMDGPU::V_LSHLREV_B32_e32_gfx10:
2684 case AMDGPU::V_LSHLREV_B32_e32_gfx6_gfx7:
2685 case AMDGPU::V_LSHLREV_B32_e32_vi:
2686 case AMDGPU::V_LSHLREV_B32_e64_gfx10:
2687 case AMDGPU::V_LSHLREV_B32_e64_gfx6_gfx7:
2688 case AMDGPU::V_LSHLREV_B32_e64_vi:
2689
2690 case AMDGPU::V_LSHLREV_B16_e32:
2691 case AMDGPU::V_LSHLREV_B16_e64:
2692 case AMDGPU::V_LSHLREV_B16_e32_vi:
2693 case AMDGPU::V_LSHLREV_B16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002694 case AMDGPU::V_LSHLREV_B16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002695
2696 case AMDGPU::V_LSHRREV_B16_e32:
2697 case AMDGPU::V_LSHRREV_B16_e64:
2698 case AMDGPU::V_LSHRREV_B16_e32_vi:
2699 case AMDGPU::V_LSHRREV_B16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002700 case AMDGPU::V_LSHRREV_B16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002701
2702 case AMDGPU::V_ASHRREV_I16_e32:
2703 case AMDGPU::V_ASHRREV_I16_e64:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002704 case AMDGPU::V_ASHRREV_I16_e32_vi:
2705 case AMDGPU::V_ASHRREV_I16_e64_vi:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002706 case AMDGPU::V_ASHRREV_I16_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002707
2708 case AMDGPU::V_LSHLREV_B64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002709 case AMDGPU::V_LSHLREV_B64_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002710 case AMDGPU::V_LSHLREV_B64_vi:
2711
2712 case AMDGPU::V_LSHRREV_B64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002713 case AMDGPU::V_LSHRREV_B64_gfx10:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002714 case AMDGPU::V_LSHRREV_B64_vi:
2715
2716 case AMDGPU::V_ASHRREV_I64:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002717 case AMDGPU::V_ASHRREV_I64_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002718 case AMDGPU::V_ASHRREV_I64_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002719
2720 case AMDGPU::V_PK_LSHLREV_B16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002721 case AMDGPU::V_PK_LSHLREV_B16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002722 case AMDGPU::V_PK_LSHLREV_B16_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002723
2724 case AMDGPU::V_PK_LSHRREV_B16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002725 case AMDGPU::V_PK_LSHRREV_B16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002726 case AMDGPU::V_PK_LSHRREV_B16_vi:
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00002727 case AMDGPU::V_PK_ASHRREV_I16:
Stanislav Mekhanoshin61beff02019-04-26 17:56:03 +00002728 case AMDGPU::V_PK_ASHRREV_I16_gfx10:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002729 case AMDGPU::V_PK_ASHRREV_I16_vi:
2730 return true;
2731 default:
2732 return false;
2733 }
2734}
2735
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002736bool AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
2737
2738 using namespace SIInstrFlags;
2739 const unsigned Opcode = Inst.getOpcode();
2740 const MCInstrDesc &Desc = MII.get(Opcode);
2741
2742 // lds_direct register is defined so that it can be used
2743 // with 9-bit operands only. Ignore encodings which do not accept these.
2744 if ((Desc.TSFlags & (VOP1 | VOP2 | VOP3 | VOPC | VOP3P | SIInstrFlags::SDWA)) == 0)
2745 return true;
2746
2747 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2748 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2749 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2750
2751 const int SrcIndices[] = { Src1Idx, Src2Idx };
2752
2753 // lds_direct cannot be specified as either src1 or src2.
2754 for (int SrcIdx : SrcIndices) {
2755 if (SrcIdx == -1) break;
2756 const MCOperand &Src = Inst.getOperand(SrcIdx);
2757 if (Src.isReg() && Src.getReg() == LDS_DIRECT) {
2758 return false;
2759 }
2760 }
2761
2762 if (Src0Idx == -1)
2763 return true;
2764
2765 const MCOperand &Src = Inst.getOperand(Src0Idx);
2766 if (!Src.isReg() || Src.getReg() != LDS_DIRECT)
2767 return true;
2768
2769 // lds_direct is specified as src0. Check additional limitations.
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002770 return (Desc.TSFlags & SIInstrFlags::SDWA) == 0 && !IsRevOpcode(Opcode);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002771}
2772
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00002773bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst) const {
2774 unsigned Opcode = Inst.getOpcode();
2775 const MCInstrDesc &Desc = MII.get(Opcode);
2776 if (!(Desc.TSFlags & (SIInstrFlags::SOP2 | SIInstrFlags::SOPC)))
2777 return true;
2778
2779 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2780 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2781
2782 const int OpIndices[] = { Src0Idx, Src1Idx };
2783
2784 unsigned NumLiterals = 0;
2785 uint32_t LiteralValue;
2786
2787 for (int OpIdx : OpIndices) {
2788 if (OpIdx == -1) break;
2789
2790 const MCOperand &MO = Inst.getOperand(OpIdx);
2791 if (MO.isImm() &&
2792 // Exclude special imm operands (like that used by s_set_gpr_idx_on)
2793 AMDGPU::isSISrcOperand(Desc, OpIdx) &&
2794 !isInlineConstant(Inst, OpIdx)) {
2795 uint32_t Value = static_cast<uint32_t>(MO.getImm());
2796 if (NumLiterals == 0 || LiteralValue != Value) {
2797 LiteralValue = Value;
2798 ++NumLiterals;
2799 }
2800 }
2801 }
2802
2803 return NumLiterals <= 1;
2804}
2805
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002806bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2807 const SMLoc &IDLoc) {
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002808 if (!validateLdsDirect(Inst)) {
2809 Error(IDLoc,
2810 "invalid use of lds_direct");
2811 return false;
2812 }
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00002813 if (!validateSOPLiteral(Inst)) {
2814 Error(IDLoc,
2815 "only one literal operand is allowed");
2816 return false;
2817 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002818 if (!validateConstantBusLimitations(Inst)) {
2819 Error(IDLoc,
2820 "invalid operand (violates constant bus restrictions)");
2821 return false;
2822 }
2823 if (!validateEarlyClobberLimitations(Inst)) {
2824 Error(IDLoc,
2825 "destination must be different than all sources");
2826 return false;
2827 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002828 if (!validateIntClampSupported(Inst)) {
2829 Error(IDLoc,
2830 "integer clamping is not supported on this GPU");
2831 return false;
2832 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002833 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
2834 if (!validateMIMGD16(Inst)) {
2835 Error(IDLoc,
2836 "d16 modifier is not supported on this GPU");
2837 return false;
2838 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002839 if (!validateMIMGDataSize(Inst)) {
2840 Error(IDLoc,
2841 "image data size does not match dmask and tfe");
2842 return false;
2843 }
2844 if (!validateMIMGAtomicDMask(Inst)) {
2845 Error(IDLoc,
2846 "invalid atomic image dmask");
2847 return false;
2848 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002849 if (!validateMIMGGatherDMask(Inst)) {
2850 Error(IDLoc,
2851 "invalid image_gather dmask: only one bit must be set");
2852 return false;
2853 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002854
2855 return true;
2856}
2857
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00002858static std::string AMDGPUMnemonicSpellCheck(StringRef S,
2859 const FeatureBitset &FBS,
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002860 unsigned VariantID = 0);
2861
Tom Stellard45bb48e2015-06-13 03:28:10 +00002862bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2863 OperandVector &Operands,
2864 MCStreamer &Out,
2865 uint64_t &ErrorInfo,
2866 bool MatchingInlineAsm) {
2867 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002868 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002869 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002870 uint64_t EI;
2871 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2872 Variant);
2873 // We order match statuses from least to most specific. We use most specific
2874 // status as resulting
2875 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2876 if ((R == Match_Success) ||
2877 (R == Match_PreferE32) ||
2878 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2879 (R == Match_InvalidOperand && Result != Match_MissingFeature
2880 && Result != Match_PreferE32) ||
2881 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2882 && Result != Match_MissingFeature
2883 && Result != Match_PreferE32)) {
2884 Result = R;
2885 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002886 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002887 if (R == Match_Success)
2888 break;
2889 }
2890
2891 switch (Result) {
2892 default: break;
2893 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002894 if (!validateInstruction(Inst, IDLoc)) {
2895 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002896 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002897 Inst.setLoc(IDLoc);
2898 Out.EmitInstruction(Inst, getSTI());
2899 return false;
2900
2901 case Match_MissingFeature:
2902 return Error(IDLoc, "instruction not supported on this GPU");
2903
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002904 case Match_MnemonicFail: {
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00002905 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002906 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2907 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2908 return Error(IDLoc, "invalid instruction" + Suggestion,
2909 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2910 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002911
2912 case Match_InvalidOperand: {
2913 SMLoc ErrorLoc = IDLoc;
2914 if (ErrorInfo != ~0ULL) {
2915 if (ErrorInfo >= Operands.size()) {
2916 return Error(IDLoc, "too few operands for instruction");
2917 }
2918 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2919 if (ErrorLoc == SMLoc())
2920 ErrorLoc = IDLoc;
2921 }
2922 return Error(ErrorLoc, "invalid operand for instruction");
2923 }
2924
2925 case Match_PreferE32:
2926 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2927 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002928 }
2929 llvm_unreachable("Implement any new match types added!");
2930}
2931
Artem Tamazov25478d82016-12-29 15:41:52 +00002932bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2933 int64_t Tmp = -1;
2934 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2935 return true;
2936 }
2937 if (getParser().parseAbsoluteExpression(Tmp)) {
2938 return true;
2939 }
2940 Ret = static_cast<uint32_t>(Tmp);
2941 return false;
2942}
2943
Tom Stellard347ac792015-06-26 21:15:07 +00002944bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2945 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002946 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002947 return TokError("invalid major version");
2948
Tom Stellard347ac792015-06-26 21:15:07 +00002949 if (getLexer().isNot(AsmToken::Comma))
2950 return TokError("minor version number required, comma expected");
2951 Lex();
2952
Artem Tamazov25478d82016-12-29 15:41:52 +00002953 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002954 return TokError("invalid minor version");
2955
Tom Stellard347ac792015-06-26 21:15:07 +00002956 return false;
2957}
2958
Scott Linder1e8c2c72018-06-21 19:38:56 +00002959bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
2960 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2961 return TokError("directive only supported for amdgcn architecture");
2962
2963 std::string Target;
2964
2965 SMLoc TargetStart = getTok().getLoc();
2966 if (getParser().parseEscapedString(Target))
2967 return true;
2968 SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
2969
2970 std::string ExpectedTarget;
2971 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
2972 IsaInfo::streamIsaVersion(&getSTI(), ExpectedTargetOS);
2973
2974 if (Target != ExpectedTargetOS.str())
2975 return getParser().Error(TargetRange.Start, "target must match options",
2976 TargetRange);
2977
2978 getTargetStreamer().EmitDirectiveAMDGCNTarget(Target);
2979 return false;
2980}
2981
2982bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
2983 return getParser().Error(Range.Start, "value out of range", Range);
2984}
2985
2986bool AMDGPUAsmParser::calculateGPRBlocks(
2987 const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
2988 bool XNACKUsed, unsigned NextFreeVGPR, SMRange VGPRRange,
2989 unsigned NextFreeSGPR, SMRange SGPRRange, unsigned &VGPRBlocks,
2990 unsigned &SGPRBlocks) {
2991 // TODO(scott.linder): These calculations are duplicated from
2992 // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002993 IsaVersion Version = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002994
2995 unsigned NumVGPRs = NextFreeVGPR;
2996 unsigned NumSGPRs = NextFreeSGPR;
Scott Linder1e8c2c72018-06-21 19:38:56 +00002997
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00002998 if (Version.Major >= 10)
2999 NumSGPRs = 0;
3000 else {
3001 unsigned MaxAddressableNumSGPRs =
3002 IsaInfo::getAddressableNumSGPRs(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003003
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003004 if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
3005 NumSGPRs > MaxAddressableNumSGPRs)
3006 return OutOfRangeError(SGPRRange);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003007
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003008 NumSGPRs +=
3009 IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003010
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003011 if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
3012 NumSGPRs > MaxAddressableNumSGPRs)
3013 return OutOfRangeError(SGPRRange);
3014
3015 if (Features.test(FeatureSGPRInitBug))
3016 NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
3017 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00003018
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003019 VGPRBlocks = IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs);
3020 SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs);
Scott Linder1e8c2c72018-06-21 19:38:56 +00003021
3022 return false;
3023}
3024
3025bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
3026 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
3027 return TokError("directive only supported for amdgcn architecture");
3028
3029 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
3030 return TokError("directive only supported for amdhsa OS");
3031
3032 StringRef KernelName;
3033 if (getParser().parseIdentifier(KernelName))
3034 return true;
3035
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00003036 kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003037
3038 StringSet<> Seen;
3039
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003040 IsaVersion IVersion = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003041
3042 SMRange VGPRRange;
3043 uint64_t NextFreeVGPR = 0;
3044 SMRange SGPRRange;
3045 uint64_t NextFreeSGPR = 0;
3046 unsigned UserSGPRCount = 0;
3047 bool ReserveVCC = true;
3048 bool ReserveFlatScr = true;
3049 bool ReserveXNACK = hasXNACK();
3050
3051 while (true) {
3052 while (getLexer().is(AsmToken::EndOfStatement))
3053 Lex();
3054
3055 if (getLexer().isNot(AsmToken::Identifier))
3056 return TokError("expected .amdhsa_ directive or .end_amdhsa_kernel");
3057
3058 StringRef ID = getTok().getIdentifier();
3059 SMRange IDRange = getTok().getLocRange();
3060 Lex();
3061
3062 if (ID == ".end_amdhsa_kernel")
3063 break;
3064
3065 if (Seen.find(ID) != Seen.end())
3066 return TokError(".amdhsa_ directives cannot be repeated");
3067 Seen.insert(ID);
3068
3069 SMLoc ValStart = getTok().getLoc();
3070 int64_t IVal;
3071 if (getParser().parseAbsoluteExpression(IVal))
3072 return true;
3073 SMLoc ValEnd = getTok().getLoc();
3074 SMRange ValRange = SMRange(ValStart, ValEnd);
3075
3076 if (IVal < 0)
3077 return OutOfRangeError(ValRange);
3078
3079 uint64_t Val = IVal;
3080
3081#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
3082 if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
3083 return OutOfRangeError(RANGE); \
3084 AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
3085
3086 if (ID == ".amdhsa_group_segment_fixed_size") {
3087 if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
3088 return OutOfRangeError(ValRange);
3089 KD.group_segment_fixed_size = Val;
3090 } else if (ID == ".amdhsa_private_segment_fixed_size") {
3091 if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
3092 return OutOfRangeError(ValRange);
3093 KD.private_segment_fixed_size = Val;
3094 } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
3095 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3096 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
3097 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003098 UserSGPRCount += 4;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003099 } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
3100 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3101 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
3102 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003103 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003104 } else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
3105 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3106 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
3107 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003108 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003109 } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
3110 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3111 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
3112 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003113 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003114 } else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
3115 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3116 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
3117 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003118 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003119 } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
3120 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3121 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
3122 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003123 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003124 } else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
3125 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3126 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
3127 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003128 UserSGPRCount += 1;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003129 } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
3130 PARSE_BITS_ENTRY(
3131 KD.compute_pgm_rsrc2,
3132 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET, Val,
3133 ValRange);
3134 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
3135 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3136 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
3137 ValRange);
3138 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
3139 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3140 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
3141 ValRange);
3142 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
3143 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3144 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
3145 ValRange);
3146 } else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
3147 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3148 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
3149 ValRange);
3150 } else if (ID == ".amdhsa_system_vgpr_workitem_id") {
3151 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3152 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
3153 ValRange);
3154 } else if (ID == ".amdhsa_next_free_vgpr") {
3155 VGPRRange = ValRange;
3156 NextFreeVGPR = Val;
3157 } else if (ID == ".amdhsa_next_free_sgpr") {
3158 SGPRRange = ValRange;
3159 NextFreeSGPR = Val;
3160 } else if (ID == ".amdhsa_reserve_vcc") {
3161 if (!isUInt<1>(Val))
3162 return OutOfRangeError(ValRange);
3163 ReserveVCC = Val;
3164 } else if (ID == ".amdhsa_reserve_flat_scratch") {
3165 if (IVersion.Major < 7)
3166 return getParser().Error(IDRange.Start, "directive requires gfx7+",
3167 IDRange);
3168 if (!isUInt<1>(Val))
3169 return OutOfRangeError(ValRange);
3170 ReserveFlatScr = Val;
3171 } else if (ID == ".amdhsa_reserve_xnack_mask") {
3172 if (IVersion.Major < 8)
3173 return getParser().Error(IDRange.Start, "directive requires gfx8+",
3174 IDRange);
3175 if (!isUInt<1>(Val))
3176 return OutOfRangeError(ValRange);
3177 ReserveXNACK = Val;
3178 } else if (ID == ".amdhsa_float_round_mode_32") {
3179 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3180 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
3181 } else if (ID == ".amdhsa_float_round_mode_16_64") {
3182 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3183 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
3184 } else if (ID == ".amdhsa_float_denorm_mode_32") {
3185 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3186 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
3187 } else if (ID == ".amdhsa_float_denorm_mode_16_64") {
3188 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3189 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
3190 ValRange);
3191 } else if (ID == ".amdhsa_dx10_clamp") {
3192 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3193 COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
3194 } else if (ID == ".amdhsa_ieee_mode") {
3195 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
3196 Val, ValRange);
3197 } else if (ID == ".amdhsa_fp16_overflow") {
3198 if (IVersion.Major < 9)
3199 return getParser().Error(IDRange.Start, "directive requires gfx9+",
3200 IDRange);
3201 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
3202 ValRange);
3203 } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
3204 PARSE_BITS_ENTRY(
3205 KD.compute_pgm_rsrc2,
3206 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
3207 ValRange);
3208 } else if (ID == ".amdhsa_exception_fp_denorm_src") {
3209 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3210 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
3211 Val, ValRange);
3212 } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
3213 PARSE_BITS_ENTRY(
3214 KD.compute_pgm_rsrc2,
3215 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
3216 ValRange);
3217 } else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
3218 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3219 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
3220 Val, ValRange);
3221 } else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
3222 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3223 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
3224 Val, ValRange);
3225 } else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
3226 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3227 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
3228 Val, ValRange);
3229 } else if (ID == ".amdhsa_exception_int_div_zero") {
3230 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3231 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
3232 Val, ValRange);
3233 } else {
3234 return getParser().Error(IDRange.Start,
3235 "unknown .amdhsa_kernel directive", IDRange);
3236 }
3237
3238#undef PARSE_BITS_ENTRY
3239 }
3240
3241 if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
3242 return TokError(".amdhsa_next_free_vgpr directive is required");
3243
3244 if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
3245 return TokError(".amdhsa_next_free_sgpr directive is required");
3246
3247 unsigned VGPRBlocks;
3248 unsigned SGPRBlocks;
3249 if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
3250 ReserveXNACK, NextFreeVGPR, VGPRRange, NextFreeSGPR,
3251 SGPRRange, VGPRBlocks, SGPRBlocks))
3252 return true;
3253
3254 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
3255 VGPRBlocks))
3256 return OutOfRangeError(VGPRRange);
3257 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3258 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
3259
3260 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
3261 SGPRBlocks))
3262 return OutOfRangeError(SGPRRange);
3263 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3264 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
3265 SGPRBlocks);
3266
3267 if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
3268 return TokError("too many user SGPRs enabled");
3269 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
3270 UserSGPRCount);
3271
3272 getTargetStreamer().EmitAmdhsaKernelDescriptor(
3273 getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
3274 ReserveFlatScr, ReserveXNACK);
3275 return false;
3276}
3277
Tom Stellard347ac792015-06-26 21:15:07 +00003278bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00003279 uint32_t Major;
3280 uint32_t Minor;
3281
3282 if (ParseDirectiveMajorMinor(Major, Minor))
3283 return true;
3284
3285 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
3286 return false;
3287}
3288
3289bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00003290 uint32_t Major;
3291 uint32_t Minor;
3292 uint32_t Stepping;
3293 StringRef VendorName;
3294 StringRef ArchName;
3295
3296 // If this directive has no arguments, then use the ISA version for the
3297 // targeted GPU.
3298 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003299 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003300 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
3301 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00003302 "AMD", "AMDGPU");
3303 return false;
3304 }
3305
Tom Stellard347ac792015-06-26 21:15:07 +00003306 if (ParseDirectiveMajorMinor(Major, Minor))
3307 return true;
3308
3309 if (getLexer().isNot(AsmToken::Comma))
3310 return TokError("stepping version number required, comma expected");
3311 Lex();
3312
Artem Tamazov25478d82016-12-29 15:41:52 +00003313 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00003314 return TokError("invalid stepping version");
3315
Tom Stellard347ac792015-06-26 21:15:07 +00003316 if (getLexer().isNot(AsmToken::Comma))
3317 return TokError("vendor name required, comma expected");
3318 Lex();
3319
3320 if (getLexer().isNot(AsmToken::String))
3321 return TokError("invalid vendor name");
3322
3323 VendorName = getLexer().getTok().getStringContents();
3324 Lex();
3325
3326 if (getLexer().isNot(AsmToken::Comma))
3327 return TokError("arch name required, comma expected");
3328 Lex();
3329
3330 if (getLexer().isNot(AsmToken::String))
3331 return TokError("invalid arch name");
3332
3333 ArchName = getLexer().getTok().getStringContents();
3334 Lex();
3335
3336 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
3337 VendorName, ArchName);
3338 return false;
3339}
3340
Tom Stellardff7416b2015-06-26 21:58:31 +00003341bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
3342 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00003343 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
3344 // assembly for backwards compatibility.
3345 if (ID == "max_scratch_backing_memory_byte_size") {
3346 Parser.eatToEndOfStatement();
3347 return false;
3348 }
3349
Valery Pykhtindc110542016-03-06 20:25:36 +00003350 SmallString<40> ErrStr;
3351 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00003352 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00003353 return TokError(Err.str());
3354 }
Tom Stellardff7416b2015-06-26 21:58:31 +00003355 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00003356 return false;
3357}
3358
3359bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00003360 amd_kernel_code_t Header;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003361 AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
Tom Stellardff7416b2015-06-26 21:58:31 +00003362
3363 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00003364 // Lex EndOfStatement. This is in a while loop, because lexing a comment
3365 // will set the current token to EndOfStatement.
3366 while(getLexer().is(AsmToken::EndOfStatement))
3367 Lex();
3368
3369 if (getLexer().isNot(AsmToken::Identifier))
3370 return TokError("expected value identifier or .end_amd_kernel_code_t");
3371
3372 StringRef ID = getLexer().getTok().getIdentifier();
3373 Lex();
3374
3375 if (ID == ".end_amd_kernel_code_t")
3376 break;
3377
3378 if (ParseAMDKernelCodeTValue(ID, Header))
3379 return true;
3380 }
3381
3382 getTargetStreamer().EmitAMDKernelCodeT(Header);
3383
3384 return false;
3385}
3386
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003387bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
3388 if (getLexer().isNot(AsmToken::Identifier))
3389 return TokError("expected symbol name");
3390
3391 StringRef KernelName = Parser.getTok().getString();
3392
3393 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
3394 ELF::STT_AMDGPU_HSA_KERNEL);
3395 Lex();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003396 if (!AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI()))
3397 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003398 return false;
3399}
3400
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003401bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003402 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
3403 return Error(getParser().getTok().getLoc(),
3404 ".amd_amdgpu_isa directive is not available on non-amdgcn "
3405 "architectures");
3406 }
3407
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003408 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
3409
3410 std::string ISAVersionStringFromSTI;
3411 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
3412 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
3413
3414 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
3415 return Error(getParser().getTok().getLoc(),
3416 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
3417 "arguments specified through the command line");
3418 }
3419
3420 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
3421 Lex();
3422
3423 return false;
3424}
3425
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003426bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Scott Linderf5b36e52018-12-12 19:39:27 +00003427 const char *AssemblerDirectiveBegin;
3428 const char *AssemblerDirectiveEnd;
3429 std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) =
3430 AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())
3431 ? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin,
3432 HSAMD::V3::AssemblerDirectiveEnd)
3433 : std::make_tuple(HSAMD::AssemblerDirectiveBegin,
3434 HSAMD::AssemblerDirectiveEnd);
3435
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003436 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
3437 return Error(getParser().getTok().getLoc(),
Scott Linderf5b36e52018-12-12 19:39:27 +00003438 (Twine(AssemblerDirectiveBegin) + Twine(" directive is "
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003439 "not available on non-amdhsa OSes")).str());
3440 }
3441
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003442 std::string HSAMetadataString;
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003443 if (ParseToEndDirective(AssemblerDirectiveBegin, AssemblerDirectiveEnd,
3444 HSAMetadataString))
3445 return true;
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003446
Scott Linderf5b36e52018-12-12 19:39:27 +00003447 if (IsaInfo::hasCodeObjectV3(&getSTI())) {
3448 if (!getTargetStreamer().EmitHSAMetadataV3(HSAMetadataString))
3449 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3450 } else {
3451 if (!getTargetStreamer().EmitHSAMetadataV2(HSAMetadataString))
3452 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3453 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003454
3455 return false;
3456}
3457
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003458/// Common code to parse out a block of text (typically YAML) between start and
3459/// end directives.
3460bool AMDGPUAsmParser::ParseToEndDirective(const char *AssemblerDirectiveBegin,
3461 const char *AssemblerDirectiveEnd,
3462 std::string &CollectString) {
3463
3464 raw_string_ostream CollectStream(CollectString);
3465
3466 getLexer().setSkipSpace(false);
3467
3468 bool FoundEnd = false;
3469 while (!getLexer().is(AsmToken::Eof)) {
3470 while (getLexer().is(AsmToken::Space)) {
3471 CollectStream << getLexer().getTok().getString();
3472 Lex();
3473 }
3474
3475 if (getLexer().is(AsmToken::Identifier)) {
3476 StringRef ID = getLexer().getTok().getIdentifier();
3477 if (ID == AssemblerDirectiveEnd) {
3478 Lex();
3479 FoundEnd = true;
3480 break;
3481 }
3482 }
3483
3484 CollectStream << Parser.parseStringToEndOfStatement()
3485 << getContext().getAsmInfo()->getSeparatorString();
3486
3487 Parser.eatToEndOfStatement();
3488 }
3489
3490 getLexer().setSkipSpace(true);
3491
3492 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
3493 return TokError(Twine("expected directive ") +
3494 Twine(AssemblerDirectiveEnd) + Twine(" not found"));
3495 }
3496
3497 CollectStream.flush();
3498 return false;
3499}
3500
3501/// Parse the assembler directive for new MsgPack-format PAL metadata.
3502bool AMDGPUAsmParser::ParseDirectivePALMetadataBegin() {
3503 std::string String;
3504 if (ParseToEndDirective(AMDGPU::PALMD::AssemblerDirectiveBegin,
3505 AMDGPU::PALMD::AssemblerDirectiveEnd, String))
3506 return true;
3507
3508 auto PALMetadata = getTargetStreamer().getPALMetadata();
3509 if (!PALMetadata->setFromString(String))
3510 return Error(getParser().getTok().getLoc(), "invalid PAL metadata");
3511 return false;
3512}
3513
3514/// Parse the assembler directive for old linear-format PAL metadata.
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003515bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003516 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
3517 return Error(getParser().getTok().getLoc(),
3518 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
3519 "not available on non-amdpal OSes")).str());
3520 }
3521
Tim Renoufd737b552019-03-20 17:42:00 +00003522 auto PALMetadata = getTargetStreamer().getPALMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003523 PALMetadata->setLegacy();
Tim Renouf72800f02017-10-03 19:03:52 +00003524 for (;;) {
Tim Renoufd737b552019-03-20 17:42:00 +00003525 uint32_t Key, Value;
3526 if (ParseAsAbsoluteExpression(Key)) {
3527 return TokError(Twine("invalid value in ") +
3528 Twine(PALMD::AssemblerDirective));
3529 }
3530 if (getLexer().isNot(AsmToken::Comma)) {
3531 return TokError(Twine("expected an even number of values in ") +
3532 Twine(PALMD::AssemblerDirective));
3533 }
3534 Lex();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003535 if (ParseAsAbsoluteExpression(Value)) {
3536 return TokError(Twine("invalid value in ") +
3537 Twine(PALMD::AssemblerDirective));
3538 }
Tim Renoufd737b552019-03-20 17:42:00 +00003539 PALMetadata->setRegister(Key, Value);
Tim Renouf72800f02017-10-03 19:03:52 +00003540 if (getLexer().isNot(AsmToken::Comma))
3541 break;
3542 Lex();
3543 }
Tim Renouf72800f02017-10-03 19:03:52 +00003544 return false;
3545}
3546
Tom Stellard45bb48e2015-06-13 03:28:10 +00003547bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00003548 StringRef IDVal = DirectiveID.getString();
3549
Scott Linder1e8c2c72018-06-21 19:38:56 +00003550 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
3551 if (IDVal == ".amdgcn_target")
3552 return ParseDirectiveAMDGCNTarget();
Tom Stellard347ac792015-06-26 21:15:07 +00003553
Scott Linder1e8c2c72018-06-21 19:38:56 +00003554 if (IDVal == ".amdhsa_kernel")
3555 return ParseDirectiveAMDHSAKernel();
Scott Linderf5b36e52018-12-12 19:39:27 +00003556
3557 // TODO: Restructure/combine with PAL metadata directive.
3558 if (IDVal == AMDGPU::HSAMD::V3::AssemblerDirectiveBegin)
3559 return ParseDirectiveHSAMetadata();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003560 } else {
3561 if (IDVal == ".hsa_code_object_version")
3562 return ParseDirectiveHSACodeObjectVersion();
Tom Stellard347ac792015-06-26 21:15:07 +00003563
Scott Linder1e8c2c72018-06-21 19:38:56 +00003564 if (IDVal == ".hsa_code_object_isa")
3565 return ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00003566
Scott Linder1e8c2c72018-06-21 19:38:56 +00003567 if (IDVal == ".amd_kernel_code_t")
3568 return ParseDirectiveAMDKernelCodeT();
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003569
Scott Linder1e8c2c72018-06-21 19:38:56 +00003570 if (IDVal == ".amdgpu_hsa_kernel")
3571 return ParseDirectiveAMDGPUHsaKernel();
3572
3573 if (IDVal == ".amd_amdgpu_isa")
3574 return ParseDirectiveISAVersion();
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003575
Scott Linderf5b36e52018-12-12 19:39:27 +00003576 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
3577 return ParseDirectiveHSAMetadata();
3578 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003579
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003580 if (IDVal == PALMD::AssemblerDirectiveBegin)
3581 return ParseDirectivePALMetadataBegin();
3582
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003583 if (IDVal == PALMD::AssemblerDirective)
3584 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00003585
Tom Stellard45bb48e2015-06-13 03:28:10 +00003586 return true;
3587}
3588
Matt Arsenault68802d32015-11-05 03:11:27 +00003589bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
3590 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003591
3592 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
3593 R.isValid(); ++R) {
3594 if (*R == RegNo)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003595 return isGFX9() || isGFX10();
3596 }
3597
3598 // GFX10 has 2 more SGPRs 104 and 105.
3599 for (MCRegAliasIterator R(AMDGPU::SGPR104_SGPR105, &MRI, true);
3600 R.isValid(); ++R) {
3601 if (*R == RegNo)
3602 return hasSGPR104_SGPR105();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003603 }
3604
3605 switch (RegNo) {
3606 case AMDGPU::TBA:
3607 case AMDGPU::TBA_LO:
3608 case AMDGPU::TBA_HI:
3609 case AMDGPU::TMA:
3610 case AMDGPU::TMA_LO:
3611 case AMDGPU::TMA_HI:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003612 return !isGFX9() && !isGFX10();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00003613 case AMDGPU::XNACK_MASK:
3614 case AMDGPU::XNACK_MASK_LO:
3615 case AMDGPU::XNACK_MASK_HI:
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003616 return !isCI() && !isSI() && !isGFX10() && hasXNACK();
3617 case AMDGPU::SGPR_NULL:
3618 return isGFX10();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003619 default:
3620 break;
3621 }
3622
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00003623 if (isInlineValue(RegNo))
3624 return !isCI() && !isSI() && !isVI();
3625
Matt Arsenault3b159672015-12-01 20:31:08 +00003626 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00003627 return true;
3628
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003629 if (isSI() || isGFX10()) {
3630 // No flat_scr on SI.
3631 // On GFX10 flat scratch is not a valid register operand and can only be
3632 // accessed with s_setreg/s_getreg.
Matt Arsenault3b159672015-12-01 20:31:08 +00003633 switch (RegNo) {
3634 case AMDGPU::FLAT_SCR:
3635 case AMDGPU::FLAT_SCR_LO:
3636 case AMDGPU::FLAT_SCR_HI:
3637 return false;
3638 default:
3639 return true;
3640 }
3641 }
3642
Matt Arsenault68802d32015-11-05 03:11:27 +00003643 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
3644 // SI/CI have.
3645 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
3646 R.isValid(); ++R) {
3647 if (*R == RegNo)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00003648 return hasSGPR102_SGPR103();
Matt Arsenault68802d32015-11-05 03:11:27 +00003649 }
3650
3651 return true;
3652}
3653
Alex Bradbury58eba092016-11-01 16:32:05 +00003654OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003655AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003656 // Try to parse with a custom parser
3657 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3658
3659 // If we successfully parsed the operand or if there as an error parsing,
3660 // we are done.
3661 //
3662 // If we are parsing after we reach EndOfStatement then this means we
3663 // are appending default values to the Operands list. This is only done
3664 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00003665 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00003666 getLexer().is(AsmToken::EndOfStatement))
3667 return ResTy;
3668
Sam Kolton1bdcef72016-05-23 09:59:02 +00003669 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00003670
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00003671 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail)
Sam Kolton1bdcef72016-05-23 09:59:02 +00003672 return ResTy;
3673
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003674 const auto &Tok = Parser.getTok();
3675 SMLoc S = Tok.getLoc();
Tom Stellard89049702016-06-15 02:54:14 +00003676
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003677 const MCExpr *Expr = nullptr;
3678 if (!Parser.parseExpression(Expr)) {
3679 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
3680 return MatchOperand_Success;
3681 }
3682
3683 // Possibly this is an instruction flag like 'gds'.
3684 if (Tok.getKind() == AsmToken::Identifier) {
3685 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003686 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00003687 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003688 }
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003689
Sam Kolton1bdcef72016-05-23 09:59:02 +00003690 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003691}
3692
Sam Kolton05ef1c92016-06-03 10:27:37 +00003693StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
3694 // Clear any forced encodings from the previous instruction.
3695 setForcedEncodingSize(0);
3696 setForcedDPP(false);
3697 setForcedSDWA(false);
3698
3699 if (Name.endswith("_e64")) {
3700 setForcedEncodingSize(64);
3701 return Name.substr(0, Name.size() - 4);
3702 } else if (Name.endswith("_e32")) {
3703 setForcedEncodingSize(32);
3704 return Name.substr(0, Name.size() - 4);
3705 } else if (Name.endswith("_dpp")) {
3706 setForcedDPP(true);
3707 return Name.substr(0, Name.size() - 4);
3708 } else if (Name.endswith("_sdwa")) {
3709 setForcedSDWA(true);
3710 return Name.substr(0, Name.size() - 5);
3711 }
3712 return Name;
3713}
3714
Tom Stellard45bb48e2015-06-13 03:28:10 +00003715bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
3716 StringRef Name,
3717 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003718 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00003719 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003720 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00003721
Tom Stellard45bb48e2015-06-13 03:28:10 +00003722 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00003723 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003724
3725 // Eat the comma or space if there is one.
3726 if (getLexer().is(AsmToken::Comma))
3727 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00003728
Tom Stellard45bb48e2015-06-13 03:28:10 +00003729 switch (Res) {
3730 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003731 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003732 Error(getLexer().getLoc(), "failed parsing operand.");
3733 while (!getLexer().is(AsmToken::EndOfStatement)) {
3734 Parser.Lex();
3735 }
3736 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003737 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003738 Error(getLexer().getLoc(), "not a valid operand.");
3739 while (!getLexer().is(AsmToken::EndOfStatement)) {
3740 Parser.Lex();
3741 }
3742 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003743 }
3744 }
3745
Tom Stellard45bb48e2015-06-13 03:28:10 +00003746 return false;
3747}
3748
3749//===----------------------------------------------------------------------===//
3750// Utility functions
3751//===----------------------------------------------------------------------===//
3752
Alex Bradbury58eba092016-11-01 16:32:05 +00003753OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00003754AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003755 switch(getLexer().getKind()) {
3756 default: return MatchOperand_NoMatch;
3757 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003758 StringRef Name = Parser.getTok().getString();
3759 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003760 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003761 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003762
3763 Parser.Lex();
3764 if (getLexer().isNot(AsmToken::Colon))
3765 return MatchOperand_ParseFail;
3766
3767 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003768
3769 bool IsMinus = false;
3770 if (getLexer().getKind() == AsmToken::Minus) {
3771 Parser.Lex();
3772 IsMinus = true;
3773 }
3774
Tom Stellard45bb48e2015-06-13 03:28:10 +00003775 if (getLexer().isNot(AsmToken::Integer))
3776 return MatchOperand_ParseFail;
3777
3778 if (getParser().parseAbsoluteExpression(Int))
3779 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003780
3781 if (IsMinus)
3782 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003783 break;
3784 }
3785 }
3786 return MatchOperand_Success;
3787}
3788
Alex Bradbury58eba092016-11-01 16:32:05 +00003789OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003790AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003791 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003792 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003793 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003794 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003795
Alex Bradbury58eba092016-11-01 16:32:05 +00003796 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003797 if (Res != MatchOperand_Success)
3798 return Res;
3799
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003800 if (ConvertResult && !ConvertResult(Value)) {
3801 return MatchOperand_ParseFail;
3802 }
3803
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003804 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003805 return MatchOperand_Success;
3806}
3807
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003808OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
3809 const char *Prefix,
3810 OperandVector &Operands,
3811 AMDGPUOperand::ImmTy ImmTy,
3812 bool (*ConvertResult)(int64_t&)) {
3813 StringRef Name = Parser.getTok().getString();
3814 if (!Name.equals(Prefix))
3815 return MatchOperand_NoMatch;
3816
3817 Parser.Lex();
3818 if (getLexer().isNot(AsmToken::Colon))
3819 return MatchOperand_ParseFail;
3820
3821 Parser.Lex();
3822 if (getLexer().isNot(AsmToken::LBrac))
3823 return MatchOperand_ParseFail;
3824 Parser.Lex();
3825
3826 unsigned Val = 0;
3827 SMLoc S = Parser.getTok().getLoc();
3828
3829 // FIXME: How to verify the number of elements matches the number of src
3830 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00003831 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003832 if (I != 0) {
3833 if (getLexer().is(AsmToken::RBrac))
3834 break;
3835
3836 if (getLexer().isNot(AsmToken::Comma))
3837 return MatchOperand_ParseFail;
3838 Parser.Lex();
3839 }
3840
3841 if (getLexer().isNot(AsmToken::Integer))
3842 return MatchOperand_ParseFail;
3843
3844 int64_t Op;
3845 if (getParser().parseAbsoluteExpression(Op))
3846 return MatchOperand_ParseFail;
3847
3848 if (Op != 0 && Op != 1)
3849 return MatchOperand_ParseFail;
3850 Val |= (Op << I);
3851 }
3852
3853 Parser.Lex();
3854 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
3855 return MatchOperand_Success;
3856}
3857
Alex Bradbury58eba092016-11-01 16:32:05 +00003858OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003859AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003860 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003861 int64_t Bit = 0;
3862 SMLoc S = Parser.getTok().getLoc();
3863
3864 // We are at the end of the statement, and this is a default argument, so
3865 // use a default value.
3866 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3867 switch(getLexer().getKind()) {
3868 case AsmToken::Identifier: {
3869 StringRef Tok = Parser.getTok().getString();
3870 if (Tok == Name) {
Ryan Taylor1f334d02018-08-28 15:07:30 +00003871 if (Tok == "r128" && isGFX9())
3872 Error(S, "r128 modifier is not supported on this GPU");
3873 if (Tok == "a16" && !isGFX9())
3874 Error(S, "a16 modifier is not supported on this GPU");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003875 Bit = 1;
3876 Parser.Lex();
3877 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
3878 Bit = 0;
3879 Parser.Lex();
3880 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00003881 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003882 }
3883 break;
3884 }
3885 default:
3886 return MatchOperand_NoMatch;
3887 }
3888 }
3889
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003890 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003891 return MatchOperand_Success;
3892}
3893
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003894static void addOptionalImmOperand(
3895 MCInst& Inst, const OperandVector& Operands,
3896 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
3897 AMDGPUOperand::ImmTy ImmT,
3898 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003899 auto i = OptionalIdx.find(ImmT);
3900 if (i != OptionalIdx.end()) {
3901 unsigned Idx = i->second;
3902 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
3903 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00003904 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003905 }
3906}
3907
Alex Bradbury58eba092016-11-01 16:32:05 +00003908OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003909AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00003910 if (getLexer().isNot(AsmToken::Identifier)) {
3911 return MatchOperand_NoMatch;
3912 }
3913 StringRef Tok = Parser.getTok().getString();
3914 if (Tok != Prefix) {
3915 return MatchOperand_NoMatch;
3916 }
3917
3918 Parser.Lex();
3919 if (getLexer().isNot(AsmToken::Colon)) {
3920 return MatchOperand_ParseFail;
3921 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003922
Sam Kolton3025e7f2016-04-26 13:33:56 +00003923 Parser.Lex();
3924 if (getLexer().isNot(AsmToken::Identifier)) {
3925 return MatchOperand_ParseFail;
3926 }
3927
3928 Value = Parser.getTok().getString();
3929 return MatchOperand_Success;
3930}
3931
Tim Renouf35484c92018-08-21 11:06:05 +00003932// dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their
3933// values to live in a joint format operand in the MCInst encoding.
3934OperandMatchResultTy
3935AMDGPUAsmParser::parseDfmtNfmt(OperandVector &Operands) {
3936 SMLoc S = Parser.getTok().getLoc();
3937 int64_t Dfmt = 0, Nfmt = 0;
3938 // dfmt and nfmt can appear in either order, and each is optional.
3939 bool GotDfmt = false, GotNfmt = false;
3940 while (!GotDfmt || !GotNfmt) {
3941 if (!GotDfmt) {
3942 auto Res = parseIntWithPrefix("dfmt", Dfmt);
3943 if (Res != MatchOperand_NoMatch) {
3944 if (Res != MatchOperand_Success)
3945 return Res;
3946 if (Dfmt >= 16) {
3947 Error(Parser.getTok().getLoc(), "out of range dfmt");
3948 return MatchOperand_ParseFail;
3949 }
3950 GotDfmt = true;
3951 Parser.Lex();
3952 continue;
3953 }
3954 }
3955 if (!GotNfmt) {
3956 auto Res = parseIntWithPrefix("nfmt", Nfmt);
3957 if (Res != MatchOperand_NoMatch) {
3958 if (Res != MatchOperand_Success)
3959 return Res;
3960 if (Nfmt >= 8) {
3961 Error(Parser.getTok().getLoc(), "out of range nfmt");
3962 return MatchOperand_ParseFail;
3963 }
3964 GotNfmt = true;
3965 Parser.Lex();
3966 continue;
3967 }
3968 }
3969 break;
3970 }
3971 if (!GotDfmt && !GotNfmt)
3972 return MatchOperand_NoMatch;
3973 auto Format = Dfmt | Nfmt << 4;
3974 Operands.push_back(
3975 AMDGPUOperand::CreateImm(this, Format, S, AMDGPUOperand::ImmTyFORMAT));
3976 return MatchOperand_Success;
3977}
3978
Tom Stellard45bb48e2015-06-13 03:28:10 +00003979//===----------------------------------------------------------------------===//
3980// ds
3981//===----------------------------------------------------------------------===//
3982
Tom Stellard45bb48e2015-06-13 03:28:10 +00003983void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
3984 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003985 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003986
3987 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3988 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3989
3990 // Add the register arguments
3991 if (Op.isReg()) {
3992 Op.addRegOperands(Inst, 1);
3993 continue;
3994 }
3995
3996 // Handle optional arguments
3997 OptionalIdx[Op.getImmTy()] = i;
3998 }
3999
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004000 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
4001 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004002 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004003
Tom Stellard45bb48e2015-06-13 03:28:10 +00004004 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
4005}
4006
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004007void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
4008 bool IsGdsHardcoded) {
4009 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004010
4011 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4012 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4013
4014 // Add the register arguments
4015 if (Op.isReg()) {
4016 Op.addRegOperands(Inst, 1);
4017 continue;
4018 }
4019
4020 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00004021 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004022 continue;
4023 }
4024
4025 // Handle optional arguments
4026 OptionalIdx[Op.getImmTy()] = i;
4027 }
4028
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004029 AMDGPUOperand::ImmTy OffsetType =
4030 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
4031 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
4032 AMDGPUOperand::ImmTyOffset;
4033
4034 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
4035
Artem Tamazov43b61562017-02-03 12:47:30 +00004036 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004037 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004038 }
4039 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
4040}
4041
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004042void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
4043 OptionalImmIndexMap OptionalIdx;
4044
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004045 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004046 unsigned EnMask = 0;
4047 int SrcIdx = 0;
4048
4049 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4050 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4051
4052 // Add the register arguments
4053 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004054 assert(SrcIdx < 4);
4055 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004056 Op.addRegOperands(Inst, 1);
4057 ++SrcIdx;
4058 continue;
4059 }
4060
4061 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004062 assert(SrcIdx < 4);
4063 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004064 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004065 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004066 continue;
4067 }
4068
4069 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
4070 Op.addImmOperands(Inst, 1);
4071 continue;
4072 }
4073
4074 if (Op.isToken() && Op.getToken() == "done")
4075 continue;
4076
4077 // Handle optional arguments
4078 OptionalIdx[Op.getImmTy()] = i;
4079 }
4080
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004081 assert(SrcIdx == 4);
4082
4083 bool Compr = false;
4084 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
4085 Compr = true;
4086 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
4087 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
4088 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
4089 }
4090
4091 for (auto i = 0; i < SrcIdx; ++i) {
4092 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
4093 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
4094 }
4095 }
4096
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004097 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
4098 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
4099
4100 Inst.addOperand(MCOperand::createImm(EnMask));
4101}
Tom Stellard45bb48e2015-06-13 03:28:10 +00004102
4103//===----------------------------------------------------------------------===//
4104// s_waitcnt
4105//===----------------------------------------------------------------------===//
4106
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004107static bool
4108encodeCnt(
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004109 const AMDGPU::IsaVersion ISA,
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004110 int64_t &IntVal,
4111 int64_t CntVal,
4112 bool Saturate,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004113 unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned),
4114 unsigned (*decode)(const IsaVersion &Version, unsigned))
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004115{
4116 bool Failed = false;
4117
4118 IntVal = encode(ISA, IntVal, CntVal);
4119 if (CntVal != decode(ISA, IntVal)) {
4120 if (Saturate) {
4121 IntVal = encode(ISA, IntVal, -1);
4122 } else {
4123 Failed = true;
4124 }
4125 }
4126 return Failed;
4127}
4128
Tom Stellard45bb48e2015-06-13 03:28:10 +00004129bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
4130 StringRef CntName = Parser.getTok().getString();
4131 int64_t CntVal;
4132
4133 Parser.Lex();
4134 if (getLexer().isNot(AsmToken::LParen))
4135 return true;
4136
4137 Parser.Lex();
4138 if (getLexer().isNot(AsmToken::Integer))
4139 return true;
4140
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004141 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00004142 if (getParser().parseAbsoluteExpression(CntVal))
4143 return true;
4144
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004145 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Tom Stellard45bb48e2015-06-13 03:28:10 +00004146
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004147 bool Failed = true;
4148 bool Sat = CntName.endswith("_sat");
4149
4150 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
4151 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
4152 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
4153 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
4154 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
4155 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
4156 }
4157
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004158 if (Failed) {
4159 Error(ValLoc, "too large value for " + CntName);
4160 return true;
4161 }
4162
4163 if (getLexer().isNot(AsmToken::RParen)) {
4164 return true;
4165 }
4166
4167 Parser.Lex();
4168 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
4169 const AsmToken NextToken = getLexer().peekTok();
4170 if (NextToken.is(AsmToken::Identifier)) {
4171 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004172 }
4173 }
4174
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004175 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004176}
4177
Alex Bradbury58eba092016-11-01 16:32:05 +00004178OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004179AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004180 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00004181 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004182 SMLoc S = Parser.getTok().getLoc();
4183
4184 switch(getLexer().getKind()) {
4185 default: return MatchOperand_ParseFail;
4186 case AsmToken::Integer:
4187 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004188 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00004189 return MatchOperand_ParseFail;
4190 break;
4191
4192 case AsmToken::Identifier:
4193 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004194 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00004195 return MatchOperand_ParseFail;
4196 } while(getLexer().isNot(AsmToken::EndOfStatement));
4197 break;
4198 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004199 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004200 return MatchOperand_Success;
4201}
4202
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004203bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
4204 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004205 using namespace llvm::AMDGPU::Hwreg;
4206
Artem Tamazovd6468662016-04-25 14:13:51 +00004207 if (Parser.getTok().getString() != "hwreg")
4208 return true;
4209 Parser.Lex();
4210
4211 if (getLexer().isNot(AsmToken::LParen))
4212 return true;
4213 Parser.Lex();
4214
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004215 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004216 HwReg.IsSymbolic = true;
4217 HwReg.Id = ID_UNKNOWN_;
4218 const StringRef tok = Parser.getTok().getString();
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00004219 int Last = ID_SYMBOLIC_LAST_;
4220 if (isSI() || isCI() || isVI())
4221 Last = ID_SYMBOLIC_FIRST_GFX9_;
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004222 else if (isGFX9())
4223 Last = ID_SYMBOLIC_FIRST_GFX10_;
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00004224 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004225 if (tok == IdSymbolic[i]) {
4226 HwReg.Id = i;
4227 break;
4228 }
4229 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004230 Parser.Lex();
4231 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004232 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004233 if (getLexer().isNot(AsmToken::Integer))
4234 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004235 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004236 return true;
4237 }
Artem Tamazovd6468662016-04-25 14:13:51 +00004238
4239 if (getLexer().is(AsmToken::RParen)) {
4240 Parser.Lex();
4241 return false;
4242 }
4243
4244 // optional params
4245 if (getLexer().isNot(AsmToken::Comma))
4246 return true;
4247 Parser.Lex();
4248
4249 if (getLexer().isNot(AsmToken::Integer))
4250 return true;
4251 if (getParser().parseAbsoluteExpression(Offset))
4252 return true;
4253
4254 if (getLexer().isNot(AsmToken::Comma))
4255 return true;
4256 Parser.Lex();
4257
4258 if (getLexer().isNot(AsmToken::Integer))
4259 return true;
4260 if (getParser().parseAbsoluteExpression(Width))
4261 return true;
4262
4263 if (getLexer().isNot(AsmToken::RParen))
4264 return true;
4265 Parser.Lex();
4266
4267 return false;
4268}
4269
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004270OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004271 using namespace llvm::AMDGPU::Hwreg;
4272
Artem Tamazovd6468662016-04-25 14:13:51 +00004273 int64_t Imm16Val = 0;
4274 SMLoc S = Parser.getTok().getLoc();
4275
4276 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00004277 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00004278 case AsmToken::Integer:
4279 // The operand can be an integer value.
4280 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00004281 return MatchOperand_NoMatch;
4282 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00004283 Error(S, "invalid immediate: only 16-bit values are legal");
4284 // Do not return error code, but create an imm operand anyway and proceed
4285 // to the next operand, if any. That avoids unneccessary error messages.
4286 }
4287 break;
4288
4289 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004290 OperandInfoTy HwReg(ID_UNKNOWN_);
4291 int64_t Offset = OFFSET_DEFAULT_;
4292 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
4293 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00004294 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004295 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
4296 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004297 Error(S, "invalid symbolic name of hardware register");
4298 else
4299 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00004300 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004301 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00004302 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004303 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00004304 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004305 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00004306 }
4307 break;
4308 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004309 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00004310 return MatchOperand_Success;
4311}
4312
Tom Stellard45bb48e2015-06-13 03:28:10 +00004313bool AMDGPUOperand::isSWaitCnt() const {
4314 return isImm();
4315}
4316
Artem Tamazovd6468662016-04-25 14:13:51 +00004317bool AMDGPUOperand::isHwreg() const {
4318 return isImmTy(ImmTyHwreg);
4319}
4320
Artem Tamazov6edc1352016-05-26 17:00:33 +00004321bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004322 using namespace llvm::AMDGPU::SendMsg;
4323
4324 if (Parser.getTok().getString() != "sendmsg")
4325 return true;
4326 Parser.Lex();
4327
4328 if (getLexer().isNot(AsmToken::LParen))
4329 return true;
4330 Parser.Lex();
4331
4332 if (getLexer().is(AsmToken::Identifier)) {
4333 Msg.IsSymbolic = true;
4334 Msg.Id = ID_UNKNOWN_;
4335 const std::string tok = Parser.getTok().getString();
4336 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
4337 switch(i) {
4338 default: continue; // Omit gaps.
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004339 case ID_GS_ALLOC_REQ:
4340 if (isSI() || isCI() || isVI())
4341 continue;
4342 break;
4343 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE:
4344 case ID_SYSMSG: break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004345 }
4346 if (tok == IdSymbolic[i]) {
4347 Msg.Id = i;
4348 break;
4349 }
4350 }
4351 Parser.Lex();
4352 } else {
4353 Msg.IsSymbolic = false;
4354 if (getLexer().isNot(AsmToken::Integer))
4355 return true;
4356 if (getParser().parseAbsoluteExpression(Msg.Id))
4357 return true;
4358 if (getLexer().is(AsmToken::Integer))
4359 if (getParser().parseAbsoluteExpression(Msg.Id))
4360 Msg.Id = ID_UNKNOWN_;
4361 }
4362 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
4363 return false;
4364
4365 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
4366 if (getLexer().isNot(AsmToken::RParen))
4367 return true;
4368 Parser.Lex();
4369 return false;
4370 }
4371
4372 if (getLexer().isNot(AsmToken::Comma))
4373 return true;
4374 Parser.Lex();
4375
4376 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
4377 Operation.Id = ID_UNKNOWN_;
4378 if (getLexer().is(AsmToken::Identifier)) {
4379 Operation.IsSymbolic = true;
4380 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
4381 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
4382 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004383 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004384 for (int i = F; i < L; ++i) {
4385 if (Tok == S[i]) {
4386 Operation.Id = i;
4387 break;
4388 }
4389 }
4390 Parser.Lex();
4391 } else {
4392 Operation.IsSymbolic = false;
4393 if (getLexer().isNot(AsmToken::Integer))
4394 return true;
4395 if (getParser().parseAbsoluteExpression(Operation.Id))
4396 return true;
4397 }
4398
4399 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4400 // Stream id is optional.
4401 if (getLexer().is(AsmToken::RParen)) {
4402 Parser.Lex();
4403 return false;
4404 }
4405
4406 if (getLexer().isNot(AsmToken::Comma))
4407 return true;
4408 Parser.Lex();
4409
4410 if (getLexer().isNot(AsmToken::Integer))
4411 return true;
4412 if (getParser().parseAbsoluteExpression(StreamId))
4413 return true;
4414 }
4415
4416 if (getLexer().isNot(AsmToken::RParen))
4417 return true;
4418 Parser.Lex();
4419 return false;
4420}
4421
Matt Arsenault0e8a2992016-12-15 20:40:20 +00004422OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
4423 if (getLexer().getKind() != AsmToken::Identifier)
4424 return MatchOperand_NoMatch;
4425
4426 StringRef Str = Parser.getTok().getString();
4427 int Slot = StringSwitch<int>(Str)
4428 .Case("p10", 0)
4429 .Case("p20", 1)
4430 .Case("p0", 2)
4431 .Default(-1);
4432
4433 SMLoc S = Parser.getTok().getLoc();
4434 if (Slot == -1)
4435 return MatchOperand_ParseFail;
4436
4437 Parser.Lex();
4438 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
4439 AMDGPUOperand::ImmTyInterpSlot));
4440 return MatchOperand_Success;
4441}
4442
4443OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
4444 if (getLexer().getKind() != AsmToken::Identifier)
4445 return MatchOperand_NoMatch;
4446
4447 StringRef Str = Parser.getTok().getString();
4448 if (!Str.startswith("attr"))
4449 return MatchOperand_NoMatch;
4450
4451 StringRef Chan = Str.take_back(2);
4452 int AttrChan = StringSwitch<int>(Chan)
4453 .Case(".x", 0)
4454 .Case(".y", 1)
4455 .Case(".z", 2)
4456 .Case(".w", 3)
4457 .Default(-1);
4458 if (AttrChan == -1)
4459 return MatchOperand_ParseFail;
4460
4461 Str = Str.drop_back(2).drop_front(4);
4462
4463 uint8_t Attr;
4464 if (Str.getAsInteger(10, Attr))
4465 return MatchOperand_ParseFail;
4466
4467 SMLoc S = Parser.getTok().getLoc();
4468 Parser.Lex();
4469 if (Attr > 63) {
4470 Error(S, "out of bounds attr");
4471 return MatchOperand_Success;
4472 }
4473
4474 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
4475
4476 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
4477 AMDGPUOperand::ImmTyInterpAttr));
4478 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
4479 AMDGPUOperand::ImmTyAttrChan));
4480 return MatchOperand_Success;
4481}
4482
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004483void AMDGPUAsmParser::errorExpTgt() {
4484 Error(Parser.getTok().getLoc(), "invalid exp target");
4485}
4486
4487OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
4488 uint8_t &Val) {
4489 if (Str == "null") {
4490 Val = 9;
4491 return MatchOperand_Success;
4492 }
4493
4494 if (Str.startswith("mrt")) {
4495 Str = Str.drop_front(3);
4496 if (Str == "z") { // == mrtz
4497 Val = 8;
4498 return MatchOperand_Success;
4499 }
4500
4501 if (Str.getAsInteger(10, Val))
4502 return MatchOperand_ParseFail;
4503
4504 if (Val > 7)
4505 errorExpTgt();
4506
4507 return MatchOperand_Success;
4508 }
4509
4510 if (Str.startswith("pos")) {
4511 Str = Str.drop_front(3);
4512 if (Str.getAsInteger(10, Val))
4513 return MatchOperand_ParseFail;
4514
4515 if (Val > 3)
4516 errorExpTgt();
4517
4518 Val += 12;
4519 return MatchOperand_Success;
4520 }
4521
4522 if (Str.startswith("param")) {
4523 Str = Str.drop_front(5);
4524 if (Str.getAsInteger(10, Val))
4525 return MatchOperand_ParseFail;
4526
4527 if (Val >= 32)
4528 errorExpTgt();
4529
4530 Val += 32;
4531 return MatchOperand_Success;
4532 }
4533
4534 if (Str.startswith("invalid_target_")) {
4535 Str = Str.drop_front(15);
4536 if (Str.getAsInteger(10, Val))
4537 return MatchOperand_ParseFail;
4538
4539 errorExpTgt();
4540 return MatchOperand_Success;
4541 }
4542
4543 return MatchOperand_NoMatch;
4544}
4545
4546OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
4547 uint8_t Val;
4548 StringRef Str = Parser.getTok().getString();
4549
4550 auto Res = parseExpTgtImpl(Str, Val);
4551 if (Res != MatchOperand_Success)
4552 return Res;
4553
4554 SMLoc S = Parser.getTok().getLoc();
4555 Parser.Lex();
4556
4557 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
4558 AMDGPUOperand::ImmTyExpTgt));
4559 return MatchOperand_Success;
4560}
4561
Alex Bradbury58eba092016-11-01 16:32:05 +00004562OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004563AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
4564 using namespace llvm::AMDGPU::SendMsg;
4565
4566 int64_t Imm16Val = 0;
4567 SMLoc S = Parser.getTok().getLoc();
4568
4569 switch(getLexer().getKind()) {
4570 default:
4571 return MatchOperand_NoMatch;
4572 case AsmToken::Integer:
4573 // The operand can be an integer value.
4574 if (getParser().parseAbsoluteExpression(Imm16Val))
4575 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004576 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004577 Error(S, "invalid immediate: only 16-bit values are legal");
4578 // Do not return error code, but create an imm operand anyway and proceed
4579 // to the next operand, if any. That avoids unneccessary error messages.
4580 }
4581 break;
4582 case AsmToken::Identifier: {
4583 OperandInfoTy Msg(ID_UNKNOWN_);
4584 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00004585 int64_t StreamId = STREAM_ID_DEFAULT_;
4586 if (parseSendMsgConstruct(Msg, Operation, StreamId))
4587 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004588 do {
4589 // Validate and encode message ID.
4590 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
Stanislav Mekhanoshin33d806a2019-04-24 17:28:30 +00004591 || (Msg.Id == ID_GS_ALLOC_REQ && !isSI() && !isCI() && !isVI())
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004592 || Msg.Id == ID_SYSMSG)) {
4593 if (Msg.IsSymbolic)
4594 Error(S, "invalid/unsupported symbolic name of message");
4595 else
4596 Error(S, "invalid/unsupported code of message");
4597 break;
4598 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004599 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004600 // Validate and encode operation ID.
4601 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
4602 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
4603 if (Operation.IsSymbolic)
4604 Error(S, "invalid symbolic name of GS_OP");
4605 else
4606 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
4607 break;
4608 }
4609 if (Operation.Id == OP_GS_NOP
4610 && Msg.Id != ID_GS_DONE) {
4611 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
4612 break;
4613 }
4614 Imm16Val |= (Operation.Id << OP_SHIFT_);
4615 }
4616 if (Msg.Id == ID_SYSMSG) {
4617 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
4618 if (Operation.IsSymbolic)
4619 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
4620 else
4621 Error(S, "invalid/unsupported code of SYSMSG_OP");
4622 break;
4623 }
4624 Imm16Val |= (Operation.Id << OP_SHIFT_);
4625 }
4626 // Validate and encode stream ID.
4627 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4628 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
4629 Error(S, "invalid stream id: only 2-bit values are legal");
4630 break;
4631 }
4632 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
4633 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004634 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004635 }
4636 break;
4637 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004638 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004639 return MatchOperand_Success;
4640}
4641
4642bool AMDGPUOperand::isSendMsg() const {
4643 return isImmTy(ImmTySendMsg);
4644}
4645
Tom Stellard45bb48e2015-06-13 03:28:10 +00004646//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004647// parser helpers
4648//===----------------------------------------------------------------------===//
4649
4650bool
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004651AMDGPUAsmParser::isId(const AsmToken &Token, const StringRef Id) const {
4652 return Token.is(AsmToken::Identifier) && Token.getString() == Id;
4653}
4654
4655bool
4656AMDGPUAsmParser::isId(const StringRef Id) const {
4657 return isId(getToken(), Id);
4658}
4659
4660bool
4661AMDGPUAsmParser::isToken(const AsmToken::TokenKind Kind) const {
4662 return getTokenKind() == Kind;
4663}
4664
4665bool
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004666AMDGPUAsmParser::trySkipId(const StringRef Id) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004667 if (isId(Id)) {
4668 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004669 return true;
4670 }
4671 return false;
4672}
4673
4674bool
4675AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004676 if (isToken(Kind)) {
4677 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004678 return true;
4679 }
4680 return false;
4681}
4682
4683bool
4684AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
4685 const StringRef ErrMsg) {
4686 if (!trySkipToken(Kind)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004687 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004688 return false;
4689 }
4690 return true;
4691}
4692
4693bool
4694AMDGPUAsmParser::parseExpr(int64_t &Imm) {
4695 return !getParser().parseAbsoluteExpression(Imm);
4696}
4697
4698bool
4699AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004700 if (isToken(AsmToken::String)) {
4701 Val = getToken().getStringContents();
4702 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004703 return true;
4704 } else {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004705 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004706 return false;
4707 }
4708}
4709
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004710AsmToken
4711AMDGPUAsmParser::getToken() const {
4712 return Parser.getTok();
4713}
4714
4715AsmToken
4716AMDGPUAsmParser::peekToken() {
4717 return getLexer().peekTok();
4718}
4719
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00004720void
4721AMDGPUAsmParser::peekTokens(MutableArrayRef<AsmToken> Tokens) {
4722 auto TokCount = getLexer().peekTokens(Tokens);
4723
4724 for (auto Idx = TokCount; Idx < Tokens.size(); ++Idx)
4725 Tokens[Idx] = AsmToken(AsmToken::Error, "");
4726}
4727
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004728AsmToken::TokenKind
4729AMDGPUAsmParser::getTokenKind() const {
4730 return getLexer().getKind();
4731}
4732
4733SMLoc
4734AMDGPUAsmParser::getLoc() const {
4735 return getToken().getLoc();
4736}
4737
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00004738StringRef
4739AMDGPUAsmParser::getTokenStr() const {
4740 return getToken().getString();
4741}
4742
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004743void
4744AMDGPUAsmParser::lex() {
4745 Parser.Lex();
4746}
4747
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004748//===----------------------------------------------------------------------===//
4749// swizzle
4750//===----------------------------------------------------------------------===//
4751
4752LLVM_READNONE
4753static unsigned
4754encodeBitmaskPerm(const unsigned AndMask,
4755 const unsigned OrMask,
4756 const unsigned XorMask) {
4757 using namespace llvm::AMDGPU::Swizzle;
4758
4759 return BITMASK_PERM_ENC |
4760 (AndMask << BITMASK_AND_SHIFT) |
4761 (OrMask << BITMASK_OR_SHIFT) |
4762 (XorMask << BITMASK_XOR_SHIFT);
4763}
4764
4765bool
4766AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
4767 const unsigned MinVal,
4768 const unsigned MaxVal,
4769 const StringRef ErrMsg) {
4770 for (unsigned i = 0; i < OpNum; ++i) {
4771 if (!skipToken(AsmToken::Comma, "expected a comma")){
4772 return false;
4773 }
4774 SMLoc ExprLoc = Parser.getTok().getLoc();
4775 if (!parseExpr(Op[i])) {
4776 return false;
4777 }
4778 if (Op[i] < MinVal || Op[i] > MaxVal) {
4779 Error(ExprLoc, ErrMsg);
4780 return false;
4781 }
4782 }
4783
4784 return true;
4785}
4786
4787bool
4788AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
4789 using namespace llvm::AMDGPU::Swizzle;
4790
4791 int64_t Lane[LANE_NUM];
4792 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
4793 "expected a 2-bit lane id")) {
4794 Imm = QUAD_PERM_ENC;
Stanislav Mekhanoshin266f1572019-03-11 16:49:32 +00004795 for (unsigned I = 0; I < LANE_NUM; ++I) {
4796 Imm |= Lane[I] << (LANE_SHIFT * I);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004797 }
4798 return true;
4799 }
4800 return false;
4801}
4802
4803bool
4804AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
4805 using namespace llvm::AMDGPU::Swizzle;
4806
4807 SMLoc S = Parser.getTok().getLoc();
4808 int64_t GroupSize;
4809 int64_t LaneIdx;
4810
4811 if (!parseSwizzleOperands(1, &GroupSize,
4812 2, 32,
4813 "group size must be in the interval [2,32]")) {
4814 return false;
4815 }
4816 if (!isPowerOf2_64(GroupSize)) {
4817 Error(S, "group size must be a power of two");
4818 return false;
4819 }
4820 if (parseSwizzleOperands(1, &LaneIdx,
4821 0, GroupSize - 1,
4822 "lane id must be in the interval [0,group size - 1]")) {
4823 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
4824 return true;
4825 }
4826 return false;
4827}
4828
4829bool
4830AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
4831 using namespace llvm::AMDGPU::Swizzle;
4832
4833 SMLoc S = Parser.getTok().getLoc();
4834 int64_t GroupSize;
4835
4836 if (!parseSwizzleOperands(1, &GroupSize,
4837 2, 32, "group size must be in the interval [2,32]")) {
4838 return false;
4839 }
4840 if (!isPowerOf2_64(GroupSize)) {
4841 Error(S, "group size must be a power of two");
4842 return false;
4843 }
4844
4845 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
4846 return true;
4847}
4848
4849bool
4850AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
4851 using namespace llvm::AMDGPU::Swizzle;
4852
4853 SMLoc S = Parser.getTok().getLoc();
4854 int64_t GroupSize;
4855
4856 if (!parseSwizzleOperands(1, &GroupSize,
4857 1, 16, "group size must be in the interval [1,16]")) {
4858 return false;
4859 }
4860 if (!isPowerOf2_64(GroupSize)) {
4861 Error(S, "group size must be a power of two");
4862 return false;
4863 }
4864
4865 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
4866 return true;
4867}
4868
4869bool
4870AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
4871 using namespace llvm::AMDGPU::Swizzle;
4872
4873 if (!skipToken(AsmToken::Comma, "expected a comma")) {
4874 return false;
4875 }
4876
4877 StringRef Ctl;
4878 SMLoc StrLoc = Parser.getTok().getLoc();
4879 if (!parseString(Ctl)) {
4880 return false;
4881 }
4882 if (Ctl.size() != BITMASK_WIDTH) {
4883 Error(StrLoc, "expected a 5-character mask");
4884 return false;
4885 }
4886
4887 unsigned AndMask = 0;
4888 unsigned OrMask = 0;
4889 unsigned XorMask = 0;
4890
4891 for (size_t i = 0; i < Ctl.size(); ++i) {
4892 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
4893 switch(Ctl[i]) {
4894 default:
4895 Error(StrLoc, "invalid mask");
4896 return false;
4897 case '0':
4898 break;
4899 case '1':
4900 OrMask |= Mask;
4901 break;
4902 case 'p':
4903 AndMask |= Mask;
4904 break;
4905 case 'i':
4906 AndMask |= Mask;
4907 XorMask |= Mask;
4908 break;
4909 }
4910 }
4911
4912 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
4913 return true;
4914}
4915
4916bool
4917AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
4918
4919 SMLoc OffsetLoc = Parser.getTok().getLoc();
4920
4921 if (!parseExpr(Imm)) {
4922 return false;
4923 }
4924 if (!isUInt<16>(Imm)) {
4925 Error(OffsetLoc, "expected a 16-bit offset");
4926 return false;
4927 }
4928 return true;
4929}
4930
4931bool
4932AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
4933 using namespace llvm::AMDGPU::Swizzle;
4934
4935 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
4936
4937 SMLoc ModeLoc = Parser.getTok().getLoc();
4938 bool Ok = false;
4939
4940 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
4941 Ok = parseSwizzleQuadPerm(Imm);
4942 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
4943 Ok = parseSwizzleBitmaskPerm(Imm);
4944 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
4945 Ok = parseSwizzleBroadcast(Imm);
4946 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
4947 Ok = parseSwizzleSwap(Imm);
4948 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
4949 Ok = parseSwizzleReverse(Imm);
4950 } else {
4951 Error(ModeLoc, "expected a swizzle mode");
4952 }
4953
4954 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
4955 }
4956
4957 return false;
4958}
4959
4960OperandMatchResultTy
4961AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
4962 SMLoc S = Parser.getTok().getLoc();
4963 int64_t Imm = 0;
4964
4965 if (trySkipId("offset")) {
4966
4967 bool Ok = false;
4968 if (skipToken(AsmToken::Colon, "expected a colon")) {
4969 if (trySkipId("swizzle")) {
4970 Ok = parseSwizzleMacro(Imm);
4971 } else {
4972 Ok = parseSwizzleOffset(Imm);
4973 }
4974 }
4975
4976 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
4977
4978 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
4979 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00004980 // Swizzle "offset" operand is optional.
4981 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004982 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004983 }
4984}
4985
4986bool
4987AMDGPUOperand::isSwizzle() const {
4988 return isImmTy(ImmTySwizzle);
4989}
4990
4991//===----------------------------------------------------------------------===//
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00004992// VGPR Index Mode
4993//===----------------------------------------------------------------------===//
4994
4995int64_t AMDGPUAsmParser::parseGPRIdxMacro() {
4996
4997 using namespace llvm::AMDGPU::VGPRIndexMode;
4998
4999 if (trySkipToken(AsmToken::RParen)) {
5000 return OFF;
5001 }
5002
5003 int64_t Imm = 0;
5004
5005 while (true) {
5006 unsigned Mode = 0;
5007 SMLoc S = Parser.getTok().getLoc();
5008
5009 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
5010 if (trySkipId(IdSymbolic[ModeId])) {
5011 Mode = 1 << ModeId;
5012 break;
5013 }
5014 }
5015
5016 if (Mode == 0) {
5017 Error(S, (Imm == 0)?
5018 "expected a VGPR index mode or a closing parenthesis" :
5019 "expected a VGPR index mode");
5020 break;
5021 }
5022
5023 if (Imm & Mode) {
5024 Error(S, "duplicate VGPR index mode");
5025 break;
5026 }
5027 Imm |= Mode;
5028
5029 if (trySkipToken(AsmToken::RParen))
5030 break;
5031 if (!skipToken(AsmToken::Comma,
5032 "expected a comma or a closing parenthesis"))
5033 break;
5034 }
5035
5036 return Imm;
5037}
5038
5039OperandMatchResultTy
5040AMDGPUAsmParser::parseGPRIdxMode(OperandVector &Operands) {
5041
5042 int64_t Imm = 0;
5043 SMLoc S = Parser.getTok().getLoc();
5044
5045 if (getLexer().getKind() == AsmToken::Identifier &&
5046 Parser.getTok().getString() == "gpr_idx" &&
5047 getLexer().peekTok().is(AsmToken::LParen)) {
5048
5049 Parser.Lex();
5050 Parser.Lex();
5051
5052 // If parse failed, trigger an error but do not return error code
5053 // to avoid excessive error messages.
5054 Imm = parseGPRIdxMacro();
5055
5056 } else {
5057 if (getParser().parseAbsoluteExpression(Imm))
5058 return MatchOperand_NoMatch;
5059 if (Imm < 0 || !isUInt<4>(Imm)) {
5060 Error(S, "invalid immediate: only 4-bit values are legal");
5061 }
5062 }
5063
5064 Operands.push_back(
5065 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyGprIdxMode));
5066 return MatchOperand_Success;
5067}
5068
5069bool AMDGPUOperand::isGPRIdxMode() const {
5070 return isImmTy(ImmTyGprIdxMode);
5071}
5072
5073//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005074// sopp branch targets
5075//===----------------------------------------------------------------------===//
5076
Alex Bradbury58eba092016-11-01 16:32:05 +00005077OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00005078AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
5079 SMLoc S = Parser.getTok().getLoc();
5080
5081 switch (getLexer().getKind()) {
5082 default: return MatchOperand_ParseFail;
5083 case AsmToken::Integer: {
5084 int64_t Imm;
5085 if (getParser().parseAbsoluteExpression(Imm))
5086 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005087 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00005088 return MatchOperand_Success;
5089 }
5090
5091 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005092 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00005093 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
5094 Parser.getTok().getString()), getContext()), S));
5095 Parser.Lex();
5096 return MatchOperand_Success;
5097 }
5098}
5099
5100//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005101// mubuf
5102//===----------------------------------------------------------------------===//
5103
Sam Kolton5f10a132016-05-06 11:31:17 +00005104AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005105 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005106}
5107
5108AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005109 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005110}
5111
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005112void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
5113 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005114 bool IsAtomic,
5115 bool IsAtomicReturn,
5116 bool IsLds) {
5117 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005118 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005119 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005120 assert(IsAtomicReturn ? IsAtomic : true);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005121 unsigned FirstOperandIdx = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005122
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005123 for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00005124 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5125
5126 // Add the register arguments
5127 if (Op.isReg()) {
5128 Op.addRegOperands(Inst, 1);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005129 // Insert a tied src for atomic return dst.
5130 // This cannot be postponed as subsequent calls to
5131 // addImmOperands rely on correct number of MC operands.
5132 if (IsAtomicReturn && i == FirstOperandIdx)
5133 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00005134 continue;
5135 }
5136
5137 // Handle the case where soffset is an immediate
5138 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5139 Op.addImmOperands(Inst, 1);
5140 continue;
5141 }
5142
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005143 HasLdsModifier = Op.isLDS();
5144
Tom Stellard45bb48e2015-06-13 03:28:10 +00005145 // Handle tokens like 'offen' which are sometimes hard-coded into the
5146 // asm string. There are no MCInst operands for these.
5147 if (Op.isToken()) {
5148 continue;
5149 }
5150 assert(Op.isImm());
5151
5152 // Handle optional arguments
5153 OptionalIdx[Op.getImmTy()] = i;
5154 }
5155
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005156 // This is a workaround for an llvm quirk which may result in an
5157 // incorrect instruction selection. Lds and non-lds versions of
5158 // MUBUF instructions are identical except that lds versions
5159 // have mandatory 'lds' modifier. However this modifier follows
5160 // optional modifiers and llvm asm matcher regards this 'lds'
5161 // modifier as an optional one. As a result, an lds version
5162 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005163 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005164 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
5165 if (NoLdsOpcode != -1) { // Got lds version - correct it.
5166 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005167 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005168 }
5169 }
5170
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005171 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005172 if (!IsAtomic) { // glc is hard-coded.
5173 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5174 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005175 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005176
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005177 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005178 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5179 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005180}
5181
David Stuttard70e8bc12017-06-22 16:29:22 +00005182void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
5183 OptionalImmIndexMap OptionalIdx;
5184
5185 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
5186 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5187
5188 // Add the register arguments
5189 if (Op.isReg()) {
5190 Op.addRegOperands(Inst, 1);
5191 continue;
5192 }
5193
5194 // Handle the case where soffset is an immediate
5195 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5196 Op.addImmOperands(Inst, 1);
5197 continue;
5198 }
5199
5200 // Handle tokens like 'offen' which are sometimes hard-coded into the
5201 // asm string. There are no MCInst operands for these.
5202 if (Op.isToken()) {
5203 continue;
5204 }
5205 assert(Op.isImm());
5206
5207 // Handle optional arguments
5208 OptionalIdx[Op.getImmTy()] = i;
5209 }
5210
5211 addOptionalImmOperand(Inst, Operands, OptionalIdx,
5212 AMDGPUOperand::ImmTyOffset);
Tim Renouf35484c92018-08-21 11:06:05 +00005213 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT);
David Stuttard70e8bc12017-06-22 16:29:22 +00005214 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5215 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
5216 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5217}
5218
Tom Stellard45bb48e2015-06-13 03:28:10 +00005219//===----------------------------------------------------------------------===//
5220// mimg
5221//===----------------------------------------------------------------------===//
5222
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005223void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
5224 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00005225 unsigned I = 1;
5226 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5227 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5228 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5229 }
5230
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005231 if (IsAtomic) {
5232 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005233 assert(Desc.getNumDefs() == 1);
5234 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005235 }
5236
Sam Kolton1bdcef72016-05-23 09:59:02 +00005237 OptionalImmIndexMap OptionalIdx;
5238
5239 for (unsigned E = Operands.size(); I != E; ++I) {
5240 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5241
5242 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005243 if (Op.isReg()) {
5244 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005245 } else if (Op.isImmModifier()) {
5246 OptionalIdx[Op.getImmTy()] = I;
5247 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00005248 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00005249 }
5250 }
5251
5252 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
5253 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
5254 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005255 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005256 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005257 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5258 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005259 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005260 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005261}
5262
5263void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005264 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005265}
5266
Tom Stellard45bb48e2015-06-13 03:28:10 +00005267//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00005268// smrd
5269//===----------------------------------------------------------------------===//
5270
Artem Tamazov54bfd542016-10-31 16:07:39 +00005271bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00005272 return isImm() && isUInt<8>(getImm());
5273}
5274
Artem Tamazov54bfd542016-10-31 16:07:39 +00005275bool AMDGPUOperand::isSMRDOffset20() const {
5276 return isImm() && isUInt<20>(getImm());
5277}
5278
Tom Stellard217361c2015-08-06 19:28:38 +00005279bool AMDGPUOperand::isSMRDLiteralOffset() const {
5280 // 32-bit literals are only supported on CI and we only want to use them
5281 // when the offset is > 8-bits.
5282 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
5283}
5284
Artem Tamazov54bfd542016-10-31 16:07:39 +00005285AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
5286 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5287}
5288
5289AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005290 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005291}
5292
5293AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005294 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005295}
5296
Matt Arsenaultfd023142017-06-12 15:55:58 +00005297AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
5298 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5299}
5300
Matt Arsenault9698f1c2017-06-20 19:54:14 +00005301AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
5302 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5303}
5304
Tom Stellard217361c2015-08-06 19:28:38 +00005305//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005306// vop3
5307//===----------------------------------------------------------------------===//
5308
5309static bool ConvertOmodMul(int64_t &Mul) {
5310 if (Mul != 1 && Mul != 2 && Mul != 4)
5311 return false;
5312
5313 Mul >>= 1;
5314 return true;
5315}
5316
5317static bool ConvertOmodDiv(int64_t &Div) {
5318 if (Div == 1) {
5319 Div = 0;
5320 return true;
5321 }
5322
5323 if (Div == 2) {
5324 Div = 3;
5325 return true;
5326 }
5327
5328 return false;
5329}
5330
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005331static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
5332 if (BoundCtrl == 0) {
5333 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005334 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00005335 }
5336
5337 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005338 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005339 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005340 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005341
Tom Stellard45bb48e2015-06-13 03:28:10 +00005342 return false;
5343}
5344
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005345// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00005346static const OptionalOperand AMDGPUOptionalOperandTable[] = {
5347 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
5348 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
5349 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
5350 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
5351 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
5352 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005353 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005354 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00005355 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
Tim Renouf35484c92018-08-21 11:06:05 +00005356 {"dfmt", AMDGPUOperand::ImmTyFORMAT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005357 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
5358 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
5359 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00005360 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005361 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005362 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
5363 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
5364 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
5365 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
Ryan Taylor1f334d02018-08-28 15:07:30 +00005366 {"r128", AMDGPUOperand::ImmTyR128A16, true, nullptr},
5367 {"a16", AMDGPUOperand::ImmTyR128A16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005368 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005369 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005370 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
5371 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
5372 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
5373 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00005374 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
5375 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
5376 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005377 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00005378 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005379 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005380 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
5381 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
5382 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
5383 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005384};
Tom Stellard45bb48e2015-06-13 03:28:10 +00005385
Alex Bradbury58eba092016-11-01 16:32:05 +00005386OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00005387 unsigned size = Operands.size();
5388 assert(size > 0);
5389
5390 OperandMatchResultTy res = parseOptionalOpr(Operands);
5391
5392 // This is a hack to enable hardcoded mandatory operands which follow
5393 // optional operands.
5394 //
5395 // Current design assumes that all operands after the first optional operand
5396 // are also optional. However implementation of some instructions violates
5397 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
5398 //
5399 // To alleviate this problem, we have to (implicitly) parse extra operands
5400 // to make sure autogenerated parser of custom operands never hit hardcoded
5401 // mandatory operands.
5402
5403 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
5404
5405 // We have parsed the first optional operand.
5406 // Parse as many operands as necessary to skip all mandatory operands.
5407
5408 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
5409 if (res != MatchOperand_Success ||
5410 getLexer().is(AsmToken::EndOfStatement)) break;
5411 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
5412 res = parseOptionalOpr(Operands);
5413 }
5414 }
5415
5416 return res;
5417}
5418
5419OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00005420 OperandMatchResultTy res;
5421 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
5422 // try to parse any optional operand here
5423 if (Op.IsBit) {
5424 res = parseNamedBit(Op.Name, Operands, Op.Type);
5425 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
5426 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005427 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
5428 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
5429 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
5430 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00005431 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
5432 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005433 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
5434 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
5435 Op.Type == AMDGPUOperand::ImmTyNegLo ||
5436 Op.Type == AMDGPUOperand::ImmTyNegHi) {
5437 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
5438 Op.ConvertResult);
Tim Renouf35484c92018-08-21 11:06:05 +00005439 } else if (Op.Type == AMDGPUOperand::ImmTyFORMAT) {
5440 res = parseDfmtNfmt(Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00005441 } else {
5442 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
5443 }
5444 if (res != MatchOperand_NoMatch) {
5445 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005446 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005447 }
5448 return MatchOperand_NoMatch;
5449}
5450
Matt Arsenault12c53892016-11-15 19:58:54 +00005451OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005452 StringRef Name = Parser.getTok().getString();
5453 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00005454 return parseIntWithPrefix("mul", Operands,
5455 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005456 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005457
5458 if (Name == "div") {
5459 return parseIntWithPrefix("div", Operands,
5460 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
5461 }
5462
5463 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005464}
5465
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005466void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
5467 cvtVOP3P(Inst, Operands);
5468
5469 int Opc = Inst.getOpcode();
5470
5471 int SrcNum;
5472 const int Ops[] = { AMDGPU::OpName::src0,
5473 AMDGPU::OpName::src1,
5474 AMDGPU::OpName::src2 };
5475 for (SrcNum = 0;
5476 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
5477 ++SrcNum);
5478 assert(SrcNum > 0);
5479
5480 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
5481 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
5482
5483 if ((OpSel & (1 << SrcNum)) != 0) {
5484 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
5485 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
5486 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
5487 }
5488}
5489
Sam Koltona3ec5c12016-10-07 14:46:06 +00005490static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
5491 // 1. This operand is input modifiers
5492 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
5493 // 2. This is not last operand
5494 && Desc.NumOperands > (OpNum + 1)
5495 // 3. Next operand is register class
5496 && Desc.OpInfo[OpNum + 1].RegClass != -1
5497 // 4. Next register is not tied to any other operand
5498 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
5499}
5500
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005501void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
5502{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005503 OptionalImmIndexMap OptionalIdx;
5504 unsigned Opc = Inst.getOpcode();
5505
5506 unsigned I = 1;
5507 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5508 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5509 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5510 }
5511
5512 for (unsigned E = Operands.size(); I != E; ++I) {
5513 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5514 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5515 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5516 } else if (Op.isInterpSlot() ||
5517 Op.isInterpAttr() ||
5518 Op.isAttrChan()) {
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00005519 Inst.addOperand(MCOperand::createImm(Op.getImm()));
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005520 } else if (Op.isImmModifier()) {
5521 OptionalIdx[Op.getImmTy()] = I;
5522 } else {
5523 llvm_unreachable("unhandled operand type");
5524 }
5525 }
5526
5527 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
5528 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
5529 }
5530
5531 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5532 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5533 }
5534
5535 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5536 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5537 }
5538}
5539
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005540void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
5541 OptionalImmIndexMap &OptionalIdx) {
5542 unsigned Opc = Inst.getOpcode();
5543
Tom Stellarda90b9522016-02-11 03:28:15 +00005544 unsigned I = 1;
5545 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00005546 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00005547 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00005548 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005549
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005550 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
5551 // This instruction has src modifiers
5552 for (unsigned E = Operands.size(); I != E; ++I) {
5553 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5554 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5555 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5556 } else if (Op.isImmModifier()) {
5557 OptionalIdx[Op.getImmTy()] = I;
5558 } else if (Op.isRegOrImm()) {
5559 Op.addRegOrImmOperands(Inst, 1);
5560 } else {
5561 llvm_unreachable("unhandled operand type");
5562 }
5563 }
5564 } else {
5565 // No src modifiers
5566 for (unsigned E = Operands.size(); I != E; ++I) {
5567 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5568 if (Op.isMod()) {
5569 OptionalIdx[Op.getImmTy()] = I;
5570 } else {
5571 Op.addRegOrImmOperands(Inst, 1);
5572 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005573 }
Tom Stellarda90b9522016-02-11 03:28:15 +00005574 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005575
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005576 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5577 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5578 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005579
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005580 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5581 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5582 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00005583
Matt Arsenault0084adc2018-04-30 19:08:16 +00005584 // Special case v_mac_{f16, f32} and v_fmac_f32 (gfx906):
Sam Koltona3ec5c12016-10-07 14:46:06 +00005585 // it has src2 register operand that is tied to dst operand
5586 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00005587 // should be 0.
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00005588 if (Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
5589 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
Matt Arsenault0084adc2018-04-30 19:08:16 +00005590 Opc == AMDGPU::V_MAC_F32_e64_vi ||
5591 Opc == AMDGPU::V_MAC_F16_e64_vi ||
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00005592 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
5593 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
5594 Opc == AMDGPU::V_FMAC_F16_e64_gfx10) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005595 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005596 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005597 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
5598 ++it;
5599 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
5600 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005601}
5602
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005603void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005604 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005605 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005606}
5607
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005608void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
5609 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005610 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005611 const int Opc = Inst.getOpcode();
5612 const MCInstrDesc &Desc = MII.get(Opc);
5613
5614 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005615
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005616 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005617
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00005618 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
5619 assert(!IsPacked);
5620 Inst.addOperand(Inst.getOperand(0));
5621 }
5622
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005623 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
5624 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005625
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005626 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005627
5628 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
5629 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005630 int DefaultVal = IsPacked ? -1 : 0;
5631 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
5632 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005633 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005634
5635 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
5636 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005637 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005638 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
5639 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
5640 }
5641
5642 const int Ops[] = { AMDGPU::OpName::src0,
5643 AMDGPU::OpName::src1,
5644 AMDGPU::OpName::src2 };
5645 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
5646 AMDGPU::OpName::src1_modifiers,
5647 AMDGPU::OpName::src2_modifiers };
5648
5649 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005650
5651 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005652 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005653 unsigned NegLo = 0;
5654 unsigned NegHi = 0;
5655
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005656 if (OpSelHiIdx != -1) {
5657 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
5658 }
5659
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005660 if (NegLoIdx != -1) {
5661 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
5662 NegLo = Inst.getOperand(NegLoIdx).getImm();
5663 NegHi = Inst.getOperand(NegHiIdx).getImm();
5664 }
5665
5666 for (int J = 0; J < 3; ++J) {
5667 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
5668 if (OpIdx == -1)
5669 break;
5670
5671 uint32_t ModVal = 0;
5672
5673 if ((OpSel & (1 << J)) != 0)
5674 ModVal |= SISrcMods::OP_SEL_0;
5675
5676 if ((OpSelHi & (1 << J)) != 0)
5677 ModVal |= SISrcMods::OP_SEL_1;
5678
5679 if ((NegLo & (1 << J)) != 0)
5680 ModVal |= SISrcMods::NEG;
5681
5682 if ((NegHi & (1 << J)) != 0)
5683 ModVal |= SISrcMods::NEG_HI;
5684
5685 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
5686
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00005687 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005688 }
5689}
5690
Sam Koltondfa29f72016-03-09 12:29:31 +00005691//===----------------------------------------------------------------------===//
5692// dpp
5693//===----------------------------------------------------------------------===//
5694
5695bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005696 using namespace AMDGPU::DPP;
5697
Sam Koltondfa29f72016-03-09 12:29:31 +00005698 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
5699 if (result) {
5700 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005701 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
5702 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
5703 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
5704 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
5705 (Imm == DppCtrl::WAVE_SHL1) ||
5706 (Imm == DppCtrl::WAVE_ROL1) ||
5707 (Imm == DppCtrl::WAVE_SHR1) ||
5708 (Imm == DppCtrl::WAVE_ROR1) ||
5709 (Imm == DppCtrl::ROW_MIRROR) ||
5710 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
5711 (Imm == DppCtrl::BCAST15) ||
5712 (Imm == DppCtrl::BCAST31);
Sam Koltondfa29f72016-03-09 12:29:31 +00005713 }
5714 return false;
5715}
5716
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00005717bool AMDGPUOperand::isS16Imm() const {
5718 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
5719}
5720
5721bool AMDGPUOperand::isU16Imm() const {
5722 return isImm() && isUInt<16>(getImm());
5723}
5724
Alex Bradbury58eba092016-11-01 16:32:05 +00005725OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00005726AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005727 using namespace AMDGPU::DPP;
5728
Sam Koltondfa29f72016-03-09 12:29:31 +00005729 SMLoc S = Parser.getTok().getLoc();
5730 StringRef Prefix;
5731 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00005732
Sam Koltona74cd522016-03-18 15:35:51 +00005733 if (getLexer().getKind() == AsmToken::Identifier) {
5734 Prefix = Parser.getTok().getString();
5735 } else {
5736 return MatchOperand_NoMatch;
5737 }
5738
5739 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005740 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005741 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005742 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005743 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005744 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005745 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005746 // Check to prevent parseDPPCtrlOps from eating invalid tokens
5747 if (Prefix != "quad_perm"
5748 && Prefix != "row_shl"
5749 && Prefix != "row_shr"
5750 && Prefix != "row_ror"
5751 && Prefix != "wave_shl"
5752 && Prefix != "wave_rol"
5753 && Prefix != "wave_shr"
5754 && Prefix != "wave_ror"
5755 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00005756 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00005757 }
5758
Sam Koltona74cd522016-03-18 15:35:51 +00005759 Parser.Lex();
5760 if (getLexer().isNot(AsmToken::Colon))
5761 return MatchOperand_ParseFail;
5762
5763 if (Prefix == "quad_perm") {
5764 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00005765 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005766 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00005767 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005768 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005769
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005770 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00005771 return MatchOperand_ParseFail;
5772
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005773 for (int i = 0; i < 3; ++i) {
5774 if (getLexer().isNot(AsmToken::Comma))
5775 return MatchOperand_ParseFail;
5776 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005777
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005778 int64_t Temp;
5779 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
5780 return MatchOperand_ParseFail;
5781 const int shift = i*2 + 2;
5782 Int += (Temp << shift);
5783 }
Sam Koltona74cd522016-03-18 15:35:51 +00005784
Sam Koltona74cd522016-03-18 15:35:51 +00005785 if (getLexer().isNot(AsmToken::RBrac))
5786 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005787 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005788 } else {
5789 // sel:%d
5790 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005791 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00005792 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005793
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005794 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005795 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005796 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005797 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005798 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005799 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005800 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005801 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005802 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005803 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005804 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005805 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005806 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005807 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00005808 } else if (Prefix == "row_bcast") {
5809 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005810 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00005811 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005812 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00005813 } else {
5814 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005815 }
5816 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005817 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005818 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005819 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005820 }
Sam Koltona74cd522016-03-18 15:35:51 +00005821
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005822 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00005823 return MatchOperand_Success;
5824}
5825
Sam Kolton5f10a132016-05-06 11:31:17 +00005826AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005827 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005828}
5829
David Stuttard20ea21c2019-03-12 09:52:58 +00005830AMDGPUOperand::Ptr AMDGPUAsmParser::defaultEndpgmImmOperands() const {
5831 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyEndpgm);
5832}
5833
Sam Kolton5f10a132016-05-06 11:31:17 +00005834AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005835 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005836}
5837
Sam Kolton5f10a132016-05-06 11:31:17 +00005838AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005839 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00005840}
5841
5842void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00005843 OptionalImmIndexMap OptionalIdx;
5844
5845 unsigned I = 1;
5846 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5847 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5848 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5849 }
5850
5851 for (unsigned E = Operands.size(); I != E; ++I) {
Valery Pykhtin3d9afa22018-11-30 14:21:56 +00005852 auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(),
5853 MCOI::TIED_TO);
5854 if (TiedTo != -1) {
5855 assert((unsigned)TiedTo < Inst.getNumOperands());
5856 // handle tied old or src2 for MAC instructions
5857 Inst.addOperand(Inst.getOperand(TiedTo));
5858 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005859 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5860 // Add the register arguments
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00005861 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00005862 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00005863 // Skip it.
5864 continue;
5865 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00005866 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00005867 } else if (Op.isDPPCtrl()) {
5868 Op.addImmOperands(Inst, 1);
5869 } else if (Op.isImm()) {
5870 // Handle optional arguments
5871 OptionalIdx[Op.getImmTy()] = I;
5872 } else {
5873 llvm_unreachable("Invalid operand type");
5874 }
5875 }
5876
Sam Koltondfa29f72016-03-09 12:29:31 +00005877 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
5878 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
5879 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
5880}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00005881
Sam Kolton3025e7f2016-04-26 13:33:56 +00005882//===----------------------------------------------------------------------===//
5883// sdwa
5884//===----------------------------------------------------------------------===//
5885
Alex Bradbury58eba092016-11-01 16:32:05 +00005886OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00005887AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
5888 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005889 using namespace llvm::AMDGPU::SDWA;
5890
Sam Kolton3025e7f2016-04-26 13:33:56 +00005891 SMLoc S = Parser.getTok().getLoc();
5892 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005893 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00005894
Sam Kolton05ef1c92016-06-03 10:27:37 +00005895 res = parseStringWithPrefix(Prefix, Value);
5896 if (res != MatchOperand_Success) {
5897 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005898 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005899
Sam Kolton3025e7f2016-04-26 13:33:56 +00005900 int64_t Int;
5901 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005902 .Case("BYTE_0", SdwaSel::BYTE_0)
5903 .Case("BYTE_1", SdwaSel::BYTE_1)
5904 .Case("BYTE_2", SdwaSel::BYTE_2)
5905 .Case("BYTE_3", SdwaSel::BYTE_3)
5906 .Case("WORD_0", SdwaSel::WORD_0)
5907 .Case("WORD_1", SdwaSel::WORD_1)
5908 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005909 .Default(0xffffffff);
5910 Parser.Lex(); // eat last token
5911
5912 if (Int == 0xffffffff) {
5913 return MatchOperand_ParseFail;
5914 }
5915
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005916 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005917 return MatchOperand_Success;
5918}
5919
Alex Bradbury58eba092016-11-01 16:32:05 +00005920OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00005921AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005922 using namespace llvm::AMDGPU::SDWA;
5923
Sam Kolton3025e7f2016-04-26 13:33:56 +00005924 SMLoc S = Parser.getTok().getLoc();
5925 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005926 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005927
5928 res = parseStringWithPrefix("dst_unused", Value);
5929 if (res != MatchOperand_Success) {
5930 return res;
5931 }
5932
5933 int64_t Int;
5934 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005935 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
5936 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
5937 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005938 .Default(0xffffffff);
5939 Parser.Lex(); // eat last token
5940
5941 if (Int == 0xffffffff) {
5942 return MatchOperand_ParseFail;
5943 }
5944
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005945 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005946 return MatchOperand_Success;
5947}
5948
Sam Kolton945231a2016-06-10 09:57:59 +00005949void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005950 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005951}
5952
Sam Kolton945231a2016-06-10 09:57:59 +00005953void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005954 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
5955}
5956
Sam Koltonf7659d712017-05-23 10:08:55 +00005957void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
5958 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
5959}
5960
Sam Kolton5196b882016-07-01 09:59:21 +00005961void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005962 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00005963}
5964
5965void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00005966 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00005967 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005968
Sam Kolton05ef1c92016-06-03 10:27:37 +00005969 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00005970 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005971
5972 unsigned I = 1;
5973 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5974 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5975 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5976 }
5977
5978 for (unsigned E = Operands.size(); I != E; ++I) {
5979 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Dmitry Preobrazhensky47621d72019-04-24 14:06:15 +00005980 if (skipVcc && !skippedVcc && Op.isReg() && Op.getReg() == AMDGPU::VCC) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005981 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
5982 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
5983 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
5984 // Skip VCC only if we didn't skip it on previous iteration.
5985 if (BasicInstType == SIInstrFlags::VOP2 &&
5986 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
5987 skippedVcc = true;
5988 continue;
5989 } else if (BasicInstType == SIInstrFlags::VOPC &&
5990 Inst.getNumOperands() == 0) {
5991 skippedVcc = true;
5992 continue;
5993 }
5994 }
5995 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00005996 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005997 } else if (Op.isImm()) {
5998 // Handle optional arguments
5999 OptionalIdx[Op.getImmTy()] = I;
6000 } else {
6001 llvm_unreachable("Invalid operand type");
6002 }
Sam Koltonf7659d712017-05-23 10:08:55 +00006003 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00006004 }
6005
Stanislav Mekhanoshin4f331cb2019-04-26 23:16:16 +00006006 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx10 &&
6007 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
Sam Koltonf7659d712017-05-23 10:08:55 +00006008 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00006009 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00006010 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006011 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00006012 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00006013 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006014 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
6015 }
Sam Kolton9dffada2017-01-17 15:26:02 +00006016 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
6017 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
6018 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006019 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006020
6021 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00006022 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00006023 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00006024 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
6025 }
Sam Kolton9dffada2017-01-17 15:26:02 +00006026 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
6027 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
6028 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
6029 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006030 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006031
6032 case SIInstrFlags::VOPC:
Sam Kolton549c89d2017-06-21 08:53:38 +00006033 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00006034 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
6035 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00006036 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00006037
Sam Koltona3ec5c12016-10-07 14:46:06 +00006038 default:
6039 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
6040 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00006041 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00006042
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00006043 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00006044 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00006045 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
6046 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00006047 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00006048 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00006049 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00006050 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00006051 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00006052}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00006053
Tom Stellard45bb48e2015-06-13 03:28:10 +00006054/// Force static initialization.
6055extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00006056 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
6057 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00006058}
6059
6060#define GET_REGISTER_MATCHER
6061#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00006062#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00006063#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00006064
Sam Kolton11de3702016-05-24 12:38:33 +00006065// This fuction should be defined after auto-generated include so that we have
6066// MatchClassKind enum defined
6067unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
6068 unsigned Kind) {
6069 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00006070 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00006071 // operand. This method checks if we are given immediate operand but expect to
6072 // get corresponding token.
6073 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
6074 switch (Kind) {
6075 case MCK_addr64:
6076 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
6077 case MCK_gds:
6078 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00006079 case MCK_lds:
6080 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00006081 case MCK_glc:
6082 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
6083 case MCK_idxen:
6084 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
6085 case MCK_offen:
6086 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006087 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00006088 // When operands have expression values, they will return true for isToken,
6089 // because it is not possible to distinguish between a token and an
6090 // expression at parse time. MatchInstructionImpl() will always try to
6091 // match an operand as a token, when isToken returns true, and when the
6092 // name of the expression is not a valid token, the match will fail,
6093 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006094 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
6095 case MCK_SSrcF32:
6096 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00006097 case MCK_SoppBrTarget:
6098 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00006099 case MCK_VReg32OrOff:
6100 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00006101 case MCK_InterpSlot:
6102 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
6103 case MCK_Attr:
6104 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
6105 case MCK_AttrChan:
6106 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00006107 default:
6108 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00006109 }
6110}
David Stuttard20ea21c2019-03-12 09:52:58 +00006111
6112//===----------------------------------------------------------------------===//
6113// endpgm
6114//===----------------------------------------------------------------------===//
6115
6116OperandMatchResultTy AMDGPUAsmParser::parseEndpgmOp(OperandVector &Operands) {
6117 SMLoc S = Parser.getTok().getLoc();
6118 int64_t Imm = 0;
6119
6120 if (!parseExpr(Imm)) {
6121 // The operand is optional, if not present default to 0
6122 Imm = 0;
6123 }
6124
6125 if (!isUInt<16>(Imm)) {
6126 Error(S, "expected a 16-bit value");
6127 return MatchOperand_ParseFail;
6128 }
6129
6130 Operands.push_back(
6131 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyEndpgm));
6132 return MatchOperand_Success;
6133}
6134
6135bool AMDGPUOperand::isEndpgm() const { return isImmTy(ImmTyEndpgm); }