blob: a9c7c7eda79ac587f0e0dd743c2b1005ceb31b78 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00009#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000014#include "SIInstrInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000015#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000016#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000017#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000019#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000020#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000021#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000022#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000024#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000025#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000027#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000028#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000032#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000033#include "llvm/MC/MCInstrInfo.h"
34#include "llvm/MC/MCParser/MCAsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000036#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000038#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000042#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000043#include "llvm/Support/AMDGPUMetadata.h"
Scott Linder1e8c2c72018-06-21 19:38:56 +000044#include "llvm/Support/AMDHSAKernelDescriptor.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000045#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000046#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000047#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000048#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000049#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000050#include "llvm/Support/SMLoc.h"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +000051#include "llvm/Support/TargetParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000052#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000053#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000054#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <cstring>
58#include <iterator>
59#include <map>
60#include <memory>
61#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000062
Tom Stellard45bb48e2015-06-13 03:28:10 +000063using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000064using namespace llvm::AMDGPU;
Scott Linder1e8c2c72018-06-21 19:38:56 +000065using namespace llvm::amdhsa;
Tom Stellard45bb48e2015-06-13 03:28:10 +000066
67namespace {
68
Sam Kolton1eeb11b2016-09-09 14:44:04 +000069class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000070
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000071enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
72
Sam Kolton1eeb11b2016-09-09 14:44:04 +000073//===----------------------------------------------------------------------===//
74// Operand
75//===----------------------------------------------------------------------===//
76
Tom Stellard45bb48e2015-06-13 03:28:10 +000077class AMDGPUOperand : public MCParsedAsmOperand {
78 enum KindTy {
79 Token,
80 Immediate,
81 Register,
82 Expression
83 } Kind;
84
85 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000086 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000087
88public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000089 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000090 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000091
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000092 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000093
Sam Kolton945231a2016-06-10 09:57:59 +000094 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000095 bool Abs = false;
96 bool Neg = false;
97 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000098
99 bool hasFPModifiers() const { return Abs || Neg; }
100 bool hasIntModifiers() const { return Sext; }
101 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
102
103 int64_t getFPModifiersOperand() const {
104 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000105 Operand |= Abs ? SISrcMods::ABS : 0u;
106 Operand |= Neg ? SISrcMods::NEG : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000107 return Operand;
108 }
109
110 int64_t getIntModifiersOperand() const {
111 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000112 Operand |= Sext ? SISrcMods::SEXT : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000113 return Operand;
114 }
115
116 int64_t getModifiersOperand() const {
117 assert(!(hasFPModifiers() && hasIntModifiers())
118 && "fp and int modifiers should not be used simultaneously");
119 if (hasFPModifiers()) {
120 return getFPModifiersOperand();
121 } else if (hasIntModifiers()) {
122 return getIntModifiersOperand();
123 } else {
124 return 0;
125 }
126 }
127
128 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
129 };
130
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 enum ImmTy {
132 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000133 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000134 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000135 ImmTyOffen,
136 ImmTyIdxen,
137 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000138 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000139 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000140 ImmTyOffset0,
141 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000142 ImmTyGLC,
143 ImmTySLC,
144 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000145 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000146 ImmTyClampSI,
147 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000148 ImmTyDppCtrl,
149 ImmTyDppRowMask,
150 ImmTyDppBankMask,
151 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000152 ImmTySdwaDstSel,
153 ImmTySdwaSrc0Sel,
154 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000155 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000156 ImmTyDMask,
157 ImmTyUNorm,
158 ImmTyDA,
Ryan Taylor1f334d02018-08-28 15:07:30 +0000159 ImmTyR128A16,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000160 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000161 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000162 ImmTyExpCompr,
163 ImmTyExpVM,
Tim Renouf35484c92018-08-21 11:06:05 +0000164 ImmTyFORMAT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000165 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000166 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000167 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000168 ImmTyInterpSlot,
169 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000170 ImmTyAttrChan,
171 ImmTyOpSel,
172 ImmTyOpSelHi,
173 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000174 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000175 ImmTySwizzle,
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000176 ImmTyGprIdxMode,
David Stuttard20ea21c2019-03-12 09:52:58 +0000177 ImmTyEndpgm,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000178 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000179 };
180
181 struct TokOp {
182 const char *Data;
183 unsigned Length;
184 };
185
186 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000187 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000188 ImmTy Type;
189 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000190 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 };
192
193 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000194 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000195 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000196 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197 };
198
199 union {
200 TokOp Tok;
201 ImmOp Imm;
202 RegOp Reg;
203 const MCExpr *Expr;
204 };
205
Tom Stellard45bb48e2015-06-13 03:28:10 +0000206 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000207 if (Kind == Token)
208 return true;
209
210 if (Kind != Expression || !Expr)
211 return false;
212
213 // When parsing operands, we can't always tell if something was meant to be
214 // a token, like 'gds', or an expression that references a global variable.
215 // In this case, we assume the string is an expression, and if we need to
216 // interpret is a token, then we treat the symbol name as the token.
217 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000218 }
219
220 bool isImm() const override {
221 return Kind == Immediate;
222 }
223
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000224 bool isInlinableImm(MVT type) const;
225 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000226
Tom Stellard45bb48e2015-06-13 03:28:10 +0000227 bool isRegKind() const {
228 return Kind == Register;
229 }
230
231 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000232 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000233 }
234
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000235 bool isRegOrImmWithInputMods(unsigned RCID, MVT type) const {
236 return isRegClass(RCID) || isInlinableImm(type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000237 }
238
Matt Arsenault4bd72362016-12-10 00:39:12 +0000239 bool isRegOrImmWithInt16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000240 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000241 }
242
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000243 bool isRegOrImmWithInt32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000244 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000245 }
246
247 bool isRegOrImmWithInt64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000248 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000249 }
250
Matt Arsenault4bd72362016-12-10 00:39:12 +0000251 bool isRegOrImmWithFP16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000252 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000253 }
254
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000255 bool isRegOrImmWithFP32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000256 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000257 }
258
259 bool isRegOrImmWithFP64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000260 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000261 }
262
Sam Kolton9772eb32017-01-11 11:46:30 +0000263 bool isVReg() const {
264 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
265 isRegClass(AMDGPU::VReg_64RegClassID) ||
266 isRegClass(AMDGPU::VReg_96RegClassID) ||
267 isRegClass(AMDGPU::VReg_128RegClassID) ||
268 isRegClass(AMDGPU::VReg_256RegClassID) ||
269 isRegClass(AMDGPU::VReg_512RegClassID);
270 }
271
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000272 bool isVReg32() const {
273 return isRegClass(AMDGPU::VGPR_32RegClassID);
274 }
275
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000276 bool isVReg32OrOff() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000277 return isOff() || isVReg32();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000278 }
279
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000280 bool isSDWAOperand(MVT type) const;
281 bool isSDWAFP16Operand() const;
282 bool isSDWAFP32Operand() const;
283 bool isSDWAInt16Operand() const;
284 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000285
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000286 bool isImmTy(ImmTy ImmT) const {
287 return isImm() && Imm.Type == ImmT;
288 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000289
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000290 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000291 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000292 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000293
Sam Kolton945231a2016-06-10 09:57:59 +0000294 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
295 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
296 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000297 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
298 bool isDA() const { return isImmTy(ImmTyDA); }
Ryan Taylor1f334d02018-08-28 15:07:30 +0000299 bool isR128A16() const { return isImmTy(ImmTyR128A16); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000300 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000301 bool isOff() const { return isImmTy(ImmTyOff); }
302 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000303 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
304 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000305 bool isOffen() const { return isImmTy(ImmTyOffen); }
306 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
307 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
308 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
Dmitry Preobrazhensky04bd1182019-03-20 17:13:58 +0000309 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<8>(getImm()); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000310 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000311
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000312 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
313 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000314 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000315 bool isLDS() const { return isImmTy(ImmTyLDS); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000316 bool isGLC() const { return isImmTy(ImmTyGLC); }
317 bool isSLC() const { return isImmTy(ImmTySLC); }
318 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000319 bool isD16() const { return isImmTy(ImmTyD16); }
Tim Renouf35484c92018-08-21 11:06:05 +0000320 bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000321 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
322 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
323 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
324 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
325 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
326 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
327 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000328 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
329 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
330 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000331 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
332 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
333 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
334 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000335 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000336
Sam Kolton945231a2016-06-10 09:57:59 +0000337 bool isMod() const {
338 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000339 }
340
341 bool isRegOrImm() const {
342 return isReg() || isImm();
343 }
344
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000345 bool isRegClass(unsigned RCID) const;
346
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +0000347 bool isInlineValue() const;
348
Sam Kolton9772eb32017-01-11 11:46:30 +0000349 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
350 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
351 }
352
Matt Arsenault4bd72362016-12-10 00:39:12 +0000353 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000354 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000355 }
356
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000357 bool isSCSrcV2B16() const {
358 return isSCSrcB16();
359 }
360
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000361 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000362 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000363 }
364
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000365 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000366 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000367 }
368
Matt Arsenault4bd72362016-12-10 00:39:12 +0000369 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000370 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000371 }
372
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000373 bool isSCSrcV2F16() const {
374 return isSCSrcF16();
375 }
376
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000377 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000378 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000379 }
380
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000381 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000382 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000383 }
384
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000385 bool isSSrcB32() const {
386 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
387 }
388
Matt Arsenault4bd72362016-12-10 00:39:12 +0000389 bool isSSrcB16() const {
390 return isSCSrcB16() || isLiteralImm(MVT::i16);
391 }
392
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000393 bool isSSrcV2B16() const {
394 llvm_unreachable("cannot happen");
395 return isSSrcB16();
396 }
397
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000398 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000399 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
400 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000401 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000402 }
403
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000404 bool isSSrcF32() const {
405 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000406 }
407
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000408 bool isSSrcF64() const {
409 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000410 }
411
Matt Arsenault4bd72362016-12-10 00:39:12 +0000412 bool isSSrcF16() const {
413 return isSCSrcB16() || isLiteralImm(MVT::f16);
414 }
415
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000416 bool isSSrcV2F16() const {
417 llvm_unreachable("cannot happen");
418 return isSSrcF16();
419 }
420
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000421 bool isSSrcOrLdsB32() const {
422 return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
423 isLiteralImm(MVT::i32) || isExpr();
424 }
425
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000426 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000427 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000428 }
429
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000430 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000431 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000432 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000433
Matt Arsenault4bd72362016-12-10 00:39:12 +0000434 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000435 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000436 }
437
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000438 bool isVCSrcV2B16() const {
439 return isVCSrcB16();
440 }
441
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000442 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000443 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000444 }
445
446 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000447 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000448 }
449
Matt Arsenault4bd72362016-12-10 00:39:12 +0000450 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000451 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000452 }
453
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000454 bool isVCSrcV2F16() const {
455 return isVCSrcF16();
456 }
457
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000458 bool isVSrcB32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000459 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000460 }
461
462 bool isVSrcB64() const {
463 return isVCSrcF64() || isLiteralImm(MVT::i64);
464 }
465
Matt Arsenault4bd72362016-12-10 00:39:12 +0000466 bool isVSrcB16() const {
467 return isVCSrcF16() || isLiteralImm(MVT::i16);
468 }
469
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000470 bool isVSrcV2B16() const {
471 llvm_unreachable("cannot happen");
472 return isVSrcB16();
473 }
474
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000475 bool isVSrcF32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000476 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000477 }
478
479 bool isVSrcF64() const {
480 return isVCSrcF64() || isLiteralImm(MVT::f64);
481 }
482
Matt Arsenault4bd72362016-12-10 00:39:12 +0000483 bool isVSrcF16() const {
484 return isVCSrcF16() || isLiteralImm(MVT::f16);
485 }
486
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000487 bool isVSrcV2F16() const {
488 llvm_unreachable("cannot happen");
489 return isVSrcF16();
490 }
491
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000492 bool isKImmFP32() const {
493 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000494 }
495
Matt Arsenault4bd72362016-12-10 00:39:12 +0000496 bool isKImmFP16() const {
497 return isLiteralImm(MVT::f16);
498 }
499
Tom Stellard45bb48e2015-06-13 03:28:10 +0000500 bool isMem() const override {
501 return false;
502 }
503
504 bool isExpr() const {
505 return Kind == Expression;
506 }
507
508 bool isSoppBrTarget() const {
509 return isExpr() || isImm();
510 }
511
Sam Kolton945231a2016-06-10 09:57:59 +0000512 bool isSWaitCnt() const;
513 bool isHwreg() const;
514 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000515 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000516 bool isSMRDOffset8() const;
517 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000518 bool isSMRDLiteralOffset() const;
519 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000520 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000521 bool isS16Imm() const;
522 bool isU16Imm() const;
David Stuttard20ea21c2019-03-12 09:52:58 +0000523 bool isEndpgm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000524
Tom Stellard89049702016-06-15 02:54:14 +0000525 StringRef getExpressionAsToken() const {
526 assert(isExpr());
527 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
528 return S->getSymbol().getName();
529 }
530
Sam Kolton945231a2016-06-10 09:57:59 +0000531 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000532 assert(isToken());
533
534 if (Kind == Expression)
535 return getExpressionAsToken();
536
Sam Kolton945231a2016-06-10 09:57:59 +0000537 return StringRef(Tok.Data, Tok.Length);
538 }
539
540 int64_t getImm() const {
541 assert(isImm());
542 return Imm.Val;
543 }
544
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000545 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000546 assert(isImm());
547 return Imm.Type;
548 }
549
550 unsigned getReg() const override {
551 return Reg.RegNo;
552 }
553
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 SMLoc getStartLoc() const override {
555 return StartLoc;
556 }
557
Peter Collingbourne0da86302016-10-10 22:49:37 +0000558 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000559 return EndLoc;
560 }
561
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000562 SMRange getLocRange() const {
563 return SMRange(StartLoc, EndLoc);
564 }
565
Sam Kolton945231a2016-06-10 09:57:59 +0000566 Modifiers getModifiers() const {
567 assert(isRegKind() || isImmTy(ImmTyNone));
568 return isRegKind() ? Reg.Mods : Imm.Mods;
569 }
570
571 void setModifiers(Modifiers Mods) {
572 assert(isRegKind() || isImmTy(ImmTyNone));
573 if (isRegKind())
574 Reg.Mods = Mods;
575 else
576 Imm.Mods = Mods;
577 }
578
579 bool hasModifiers() const {
580 return getModifiers().hasModifiers();
581 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000582
Sam Kolton945231a2016-06-10 09:57:59 +0000583 bool hasFPModifiers() const {
584 return getModifiers().hasFPModifiers();
585 }
586
587 bool hasIntModifiers() const {
588 return getModifiers().hasIntModifiers();
589 }
590
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000591 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
592
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000593 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000594
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000595 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000596
Matt Arsenault4bd72362016-12-10 00:39:12 +0000597 template <unsigned Bitwidth>
598 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
599
600 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
601 addKImmFPOperands<16>(Inst, N);
602 }
603
604 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
605 addKImmFPOperands<32>(Inst, N);
606 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000607
608 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000609
610 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
611 if (isRegKind())
612 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000613 else if (isExpr())
614 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000615 else
616 addImmOperands(Inst, N);
617 }
618
619 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
620 Modifiers Mods = getModifiers();
621 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
622 if (isRegKind()) {
623 addRegOperands(Inst, N);
624 } else {
625 addImmOperands(Inst, N, false);
626 }
627 }
628
629 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
630 assert(!hasIntModifiers());
631 addRegOrImmWithInputModsOperands(Inst, N);
632 }
633
634 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
635 assert(!hasFPModifiers());
636 addRegOrImmWithInputModsOperands(Inst, N);
637 }
638
Sam Kolton9772eb32017-01-11 11:46:30 +0000639 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
640 Modifiers Mods = getModifiers();
641 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
642 assert(isRegKind());
643 addRegOperands(Inst, N);
644 }
645
646 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
647 assert(!hasIntModifiers());
648 addRegWithInputModsOperands(Inst, N);
649 }
650
651 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
652 assert(!hasFPModifiers());
653 addRegWithInputModsOperands(Inst, N);
654 }
655
Sam Kolton945231a2016-06-10 09:57:59 +0000656 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
657 if (isImm())
658 addImmOperands(Inst, N);
659 else {
660 assert(isExpr());
661 Inst.addOperand(MCOperand::createExpr(Expr));
662 }
663 }
664
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000665 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000666 switch (Type) {
667 case ImmTyNone: OS << "None"; break;
668 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000669 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000670 case ImmTyOffen: OS << "Offen"; break;
671 case ImmTyIdxen: OS << "Idxen"; break;
672 case ImmTyAddr64: OS << "Addr64"; break;
673 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000674 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000675 case ImmTyOffset0: OS << "Offset0"; break;
676 case ImmTyOffset1: OS << "Offset1"; break;
677 case ImmTyGLC: OS << "GLC"; break;
678 case ImmTySLC: OS << "SLC"; break;
679 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000680 case ImmTyD16: OS << "D16"; break;
Tim Renouf35484c92018-08-21 11:06:05 +0000681 case ImmTyFORMAT: OS << "FORMAT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000682 case ImmTyClampSI: OS << "ClampSI"; break;
683 case ImmTyOModSI: OS << "OModSI"; break;
684 case ImmTyDppCtrl: OS << "DppCtrl"; break;
685 case ImmTyDppRowMask: OS << "DppRowMask"; break;
686 case ImmTyDppBankMask: OS << "DppBankMask"; break;
687 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000688 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
689 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
690 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000691 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
692 case ImmTyDMask: OS << "DMask"; break;
693 case ImmTyUNorm: OS << "UNorm"; break;
694 case ImmTyDA: OS << "DA"; break;
Ryan Taylor1f334d02018-08-28 15:07:30 +0000695 case ImmTyR128A16: OS << "R128A16"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000696 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000697 case ImmTyOff: OS << "Off"; break;
698 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000699 case ImmTyExpCompr: OS << "ExpCompr"; break;
700 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000701 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000702 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000703 case ImmTyInterpSlot: OS << "InterpSlot"; break;
704 case ImmTyInterpAttr: OS << "InterpAttr"; break;
705 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000706 case ImmTyOpSel: OS << "OpSel"; break;
707 case ImmTyOpSelHi: OS << "OpSelHi"; break;
708 case ImmTyNegLo: OS << "NegLo"; break;
709 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000710 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000711 case ImmTyGprIdxMode: OS << "GprIdxMode"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000712 case ImmTyHigh: OS << "High"; break;
David Stuttard20ea21c2019-03-12 09:52:58 +0000713 case ImmTyEndpgm:
714 OS << "Endpgm";
715 break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000716 }
717 }
718
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000719 void print(raw_ostream &OS) const override {
720 switch (Kind) {
721 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000722 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000723 break;
724 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000725 OS << '<' << getImm();
726 if (getImmTy() != ImmTyNone) {
727 OS << " type: "; printImmTy(OS, getImmTy());
728 }
Sam Kolton945231a2016-06-10 09:57:59 +0000729 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000730 break;
731 case Token:
732 OS << '\'' << getToken() << '\'';
733 break;
734 case Expression:
735 OS << "<expr " << *Expr << '>';
736 break;
737 }
738 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000739
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000740 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
741 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000742 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000743 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000744 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000745 Op->Imm.Val = Val;
746 Op->Imm.IsFPImm = IsFPImm;
747 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000748 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000749 Op->StartLoc = Loc;
750 Op->EndLoc = Loc;
751 return Op;
752 }
753
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000754 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
755 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000756 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000757 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000758 Res->Tok.Data = Str.data();
759 Res->Tok.Length = Str.size();
760 Res->StartLoc = Loc;
761 Res->EndLoc = Loc;
762 return Res;
763 }
764
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000765 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
766 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000767 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000768 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000769 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000770 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000771 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000772 Op->Reg.IsForcedVOP3 = ForceVOP3;
773 Op->StartLoc = S;
774 Op->EndLoc = E;
775 return Op;
776 }
777
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000778 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
779 const class MCExpr *Expr, SMLoc S) {
780 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000781 Op->Expr = Expr;
782 Op->StartLoc = S;
783 Op->EndLoc = S;
784 return Op;
785 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000786};
787
Sam Kolton945231a2016-06-10 09:57:59 +0000788raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
789 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
790 return OS;
791}
792
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000793//===----------------------------------------------------------------------===//
794// AsmParser
795//===----------------------------------------------------------------------===//
796
Artem Tamazova01cce82016-12-27 16:00:11 +0000797// Holds info related to the current kernel, e.g. count of SGPRs used.
798// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
799// .amdgpu_hsa_kernel or at EOF.
800class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000801 int SgprIndexUnusedMin = -1;
802 int VgprIndexUnusedMin = -1;
803 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000804
805 void usesSgprAt(int i) {
806 if (i >= SgprIndexUnusedMin) {
807 SgprIndexUnusedMin = ++i;
808 if (Ctx) {
809 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
810 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
811 }
812 }
813 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000814
Artem Tamazova01cce82016-12-27 16:00:11 +0000815 void usesVgprAt(int i) {
816 if (i >= VgprIndexUnusedMin) {
817 VgprIndexUnusedMin = ++i;
818 if (Ctx) {
819 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
820 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
821 }
822 }
823 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000824
Artem Tamazova01cce82016-12-27 16:00:11 +0000825public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000826 KernelScopeInfo() = default;
827
Artem Tamazova01cce82016-12-27 16:00:11 +0000828 void initialize(MCContext &Context) {
829 Ctx = &Context;
830 usesSgprAt(SgprIndexUnusedMin = -1);
831 usesVgprAt(VgprIndexUnusedMin = -1);
832 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000833
Artem Tamazova01cce82016-12-27 16:00:11 +0000834 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
835 switch (RegKind) {
836 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
837 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
838 default: break;
839 }
840 }
841};
842
Tom Stellard45bb48e2015-06-13 03:28:10 +0000843class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000844 MCAsmParser &Parser;
845
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000846 // Number of extra operands parsed after the first optional operand.
847 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000848 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000849
Eugene Zelenko66203762017-01-21 00:53:49 +0000850 unsigned ForcedEncodingSize = 0;
851 bool ForcedDPP = false;
852 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000853 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000854
Tom Stellard45bb48e2015-06-13 03:28:10 +0000855 /// @name Auto-generated Match Functions
856 /// {
857
858#define GET_ASSEMBLER_HEADER
859#include "AMDGPUGenAsmMatcher.inc"
860
861 /// }
862
Tom Stellard347ac792015-06-26 21:15:07 +0000863private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000864 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000865 bool OutOfRangeError(SMRange Range);
866 /// Calculate VGPR/SGPR blocks required for given target, reserved
867 /// registers, and user-specified NextFreeXGPR values.
868 ///
869 /// \param Features [in] Target features, used for bug corrections.
870 /// \param VCCUsed [in] Whether VCC special SGPR is reserved.
871 /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
872 /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
873 /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
874 /// \param VGPRRange [in] Token range, used for VGPR diagnostics.
875 /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
876 /// \param SGPRRange [in] Token range, used for SGPR diagnostics.
877 /// \param VGPRBlocks [out] Result VGPR block count.
878 /// \param SGPRBlocks [out] Result SGPR block count.
879 bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
880 bool FlatScrUsed, bool XNACKUsed,
881 unsigned NextFreeVGPR, SMRange VGPRRange,
882 unsigned NextFreeSGPR, SMRange SGPRRange,
883 unsigned &VGPRBlocks, unsigned &SGPRBlocks);
884 bool ParseDirectiveAMDGCNTarget();
885 bool ParseDirectiveAMDHSAKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000886 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
887 bool ParseDirectiveHSACodeObjectVersion();
888 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000889 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
890 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000891 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000892 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000893
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000894 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +0000895 bool ParseDirectiveHSAMetadata();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000896 bool ParseDirectivePALMetadata();
897
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000898 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
899 RegisterKind RegKind, unsigned Reg1,
900 unsigned RegNum);
901 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
902 unsigned& RegNum, unsigned& RegWidth,
903 unsigned *DwordRegIndex);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000904 Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
905 void initializeGprCountSymbol(RegisterKind RegKind);
906 bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
907 unsigned RegWidth);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000908 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +0000909 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000910 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
911 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000912
Tom Stellard45bb48e2015-06-13 03:28:10 +0000913public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000914 enum AMDGPUMatchResultTy {
915 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
916 };
917
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000918 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000919
Akira Hatanakab11ef082015-11-14 06:35:56 +0000920 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000921 const MCInstrInfo &MII,
922 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +0000923 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000924 MCAsmParserExtension::Initialize(Parser);
925
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000926 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000927 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000928 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000929 }
930
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000931 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000932
933 {
934 // TODO: make those pre-defined variables read-only.
935 // Currently there is none suitable machinery in the core llvm-mc for this.
936 // MCSymbol::isRedefinable is intended for another purpose, and
937 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000938 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Artem Tamazov17091362016-06-14 15:03:59 +0000939 MCContext &Ctx = getContext();
Scott Linder1e8c2c72018-06-21 19:38:56 +0000940 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
941 MCSymbol *Sym =
942 Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
943 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Dmitry Preobrazhensky62a03182019-02-08 13:51:31 +0000944 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_minor"));
945 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
946 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_stepping"));
947 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000948 } else {
949 MCSymbol *Sym =
950 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
951 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
952 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
953 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
954 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
955 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
956 }
957 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
958 initializeGprCountSymbol(IS_VGPR);
959 initializeGprCountSymbol(IS_SGPR);
960 } else
961 KernelScope.initialize(getContext());
Artem Tamazov17091362016-06-14 15:03:59 +0000962 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000963 }
964
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000965 bool hasXNACK() const {
966 return AMDGPU::hasXNACK(getSTI());
967 }
968
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000969 bool hasMIMG_R128() const {
970 return AMDGPU::hasMIMG_R128(getSTI());
971 }
972
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000973 bool hasPackedD16() const {
974 return AMDGPU::hasPackedD16(getSTI());
975 }
976
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000977 bool isSI() const {
978 return AMDGPU::isSI(getSTI());
979 }
980
981 bool isCI() const {
982 return AMDGPU::isCI(getSTI());
983 }
984
985 bool isVI() const {
986 return AMDGPU::isVI(getSTI());
987 }
988
Sam Koltonf7659d712017-05-23 10:08:55 +0000989 bool isGFX9() const {
990 return AMDGPU::isGFX9(getSTI());
991 }
992
Matt Arsenault26faed32016-12-05 22:26:17 +0000993 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000994 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +0000995 }
996
Matt Arsenaultfd023142017-06-12 15:55:58 +0000997 bool hasFlatOffsets() const {
998 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
999 }
1000
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001001 bool hasSGPR102_SGPR103() const {
1002 return !isVI();
1003 }
1004
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001005 bool hasIntClamp() const {
1006 return getFeatureBits()[AMDGPU::FeatureIntClamp];
1007 }
1008
Tom Stellard347ac792015-06-26 21:15:07 +00001009 AMDGPUTargetStreamer &getTargetStreamer() {
1010 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
1011 return static_cast<AMDGPUTargetStreamer &>(TS);
1012 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001013
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001014 const MCRegisterInfo *getMRI() const {
1015 // We need this const_cast because for some reason getContext() is not const
1016 // in MCAsmParser.
1017 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
1018 }
1019
1020 const MCInstrInfo *getMII() const {
1021 return &MII;
1022 }
1023
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001024 const FeatureBitset &getFeatureBits() const {
1025 return getSTI().getFeatureBits();
1026 }
1027
Sam Kolton05ef1c92016-06-03 10:27:37 +00001028 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
1029 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1030 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +00001031
Sam Kolton05ef1c92016-06-03 10:27:37 +00001032 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
1033 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
1034 bool isForcedDPP() const { return ForcedDPP; }
1035 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +00001036 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001037
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001038 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +00001039 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1040 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +00001041 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
1042 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001043 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1044 OperandVector &Operands, MCStreamer &Out,
1045 uint64_t &ErrorInfo,
1046 bool MatchingInlineAsm) override;
1047 bool ParseDirective(AsmToken DirectiveID) override;
1048 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +00001049 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001050 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1051 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001052 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001053
Sam Kolton11de3702016-05-24 12:38:33 +00001054 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001055
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001056 OperandMatchResultTy
1057 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001058 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001059 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001060
1061 OperandMatchResultTy parseOperandArrayWithPrefix(
1062 const char *Prefix,
1063 OperandVector &Operands,
1064 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1065 bool (*ConvertResult)(int64_t&) = nullptr);
1066
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001067 OperandMatchResultTy
1068 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001069 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001070 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1071 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001072
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001073 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
1074 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001075 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001076 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001077 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1078 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1079 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1080 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001081 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Tim Renouf35484c92018-08-21 11:06:05 +00001082 OperandMatchResultTy parseDfmtNfmt(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001083
Tom Stellard45bb48e2015-06-13 03:28:10 +00001084 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001085 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1086 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001087 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001088
1089 bool parseCnt(int64_t &IntVal);
1090 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001091 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001092
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001093private:
1094 struct OperandInfoTy {
1095 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001096 bool IsSymbolic = false;
1097
1098 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001099 };
Sam Kolton11de3702016-05-24 12:38:33 +00001100
Artem Tamazov6edc1352016-05-26 17:00:33 +00001101 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1102 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001103
1104 void errorExpTgt();
1105 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1106
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001107 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00001108 bool validateSOPLiteral(const MCInst &Inst) const;
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001109 bool validateConstantBusLimitations(const MCInst &Inst);
1110 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001111 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001112 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001113 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001114 bool validateMIMGDataSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001115 bool validateMIMGD16(const MCInst &Inst);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001116 bool validateLdsDirect(const MCInst &Inst);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001117 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1118 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1119 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001120
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001121 bool trySkipId(const StringRef Id);
1122 bool trySkipToken(const AsmToken::TokenKind Kind);
1123 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1124 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1125 bool parseExpr(int64_t &Imm);
1126
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001127public:
Sam Kolton11de3702016-05-24 12:38:33 +00001128 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001129 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001130
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001131 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001132 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001133 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1134 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001135 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1136
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001137 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1138 const unsigned MinVal,
1139 const unsigned MaxVal,
1140 const StringRef ErrMsg);
1141 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1142 bool parseSwizzleOffset(int64_t &Imm);
1143 bool parseSwizzleMacro(int64_t &Imm);
1144 bool parseSwizzleQuadPerm(int64_t &Imm);
1145 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1146 bool parseSwizzleBroadcast(int64_t &Imm);
1147 bool parseSwizzleSwap(int64_t &Imm);
1148 bool parseSwizzleReverse(int64_t &Imm);
1149
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00001150 OperandMatchResultTy parseGPRIdxMode(OperandVector &Operands);
1151 int64_t parseGPRIdxMacro();
1152
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001153 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1154 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1155 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001156 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001157 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1158
Sam Kolton5f10a132016-05-06 11:31:17 +00001159 AMDGPUOperand::Ptr defaultGLC() const;
1160 AMDGPUOperand::Ptr defaultSLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001161
Artem Tamazov54bfd542016-10-31 16:07:39 +00001162 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1163 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001164 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001165 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001166 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001167
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001168 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1169
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001170 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1171 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001172 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001173 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001174 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001175
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001176 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1177
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001178 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1179 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001180 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001181
Sam Kolton11de3702016-05-24 12:38:33 +00001182 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001183 AMDGPUOperand::Ptr defaultRowMask() const;
1184 AMDGPUOperand::Ptr defaultBankMask() const;
1185 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1186 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001187
Sam Kolton05ef1c92016-06-03 10:27:37 +00001188 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1189 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001190 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001191 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1192 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001193 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001194 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1195 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001196 uint64_t BasicInstType, bool skipVcc = false);
David Stuttard20ea21c2019-03-12 09:52:58 +00001197
1198 OperandMatchResultTy parseEndpgmOp(OperandVector &Operands);
1199 AMDGPUOperand::Ptr defaultEndpgmImmOperands() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001200};
1201
1202struct OptionalOperand {
1203 const char *Name;
1204 AMDGPUOperand::ImmTy Type;
1205 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001206 bool (*ConvertResult)(int64_t&);
1207};
1208
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001209} // end anonymous namespace
1210
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001211// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001212static const fltSemantics *getFltSemantics(unsigned Size) {
1213 switch (Size) {
1214 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001215 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001216 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001217 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001218 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001219 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001220 default:
1221 llvm_unreachable("unsupported fp type");
1222 }
1223}
1224
Matt Arsenault4bd72362016-12-10 00:39:12 +00001225static const fltSemantics *getFltSemantics(MVT VT) {
1226 return getFltSemantics(VT.getSizeInBits() / 8);
1227}
1228
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001229static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1230 switch (OperandType) {
1231 case AMDGPU::OPERAND_REG_IMM_INT32:
1232 case AMDGPU::OPERAND_REG_IMM_FP32:
1233 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1234 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1235 return &APFloat::IEEEsingle();
1236 case AMDGPU::OPERAND_REG_IMM_INT64:
1237 case AMDGPU::OPERAND_REG_IMM_FP64:
1238 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1239 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1240 return &APFloat::IEEEdouble();
1241 case AMDGPU::OPERAND_REG_IMM_INT16:
1242 case AMDGPU::OPERAND_REG_IMM_FP16:
1243 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1244 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1245 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1246 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1247 return &APFloat::IEEEhalf();
1248 default:
1249 llvm_unreachable("unsupported fp type");
1250 }
1251}
1252
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001253//===----------------------------------------------------------------------===//
1254// Operand
1255//===----------------------------------------------------------------------===//
1256
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001257static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1258 bool Lost;
1259
1260 // Convert literal to single precision
1261 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1262 APFloat::rmNearestTiesToEven,
1263 &Lost);
1264 // We allow precision lost but not overflow or underflow
1265 if (Status != APFloat::opOK &&
1266 Lost &&
1267 ((Status & APFloat::opOverflow) != 0 ||
1268 (Status & APFloat::opUnderflow) != 0)) {
1269 return false;
1270 }
1271
1272 return true;
1273}
1274
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001275bool AMDGPUOperand::isInlinableImm(MVT type) const {
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001276
1277 // This is a hack to enable named inline values like
1278 // shared_base with both 32-bit and 64-bit operands.
1279 // Note that these values are defined as
1280 // 32-bit operands only.
1281 if (isInlineValue()) {
1282 return true;
1283 }
1284
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001285 if (!isImmTy(ImmTyNone)) {
1286 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1287 return false;
1288 }
1289 // TODO: We should avoid using host float here. It would be better to
1290 // check the float bit values which is what a few other places do.
1291 // We've had bot failures before due to weird NaN support on mips hosts.
1292
1293 APInt Literal(64, Imm.Val);
1294
1295 if (Imm.IsFPImm) { // We got fp literal token
1296 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001297 return AMDGPU::isInlinableLiteral64(Imm.Val,
1298 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001299 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001300
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001301 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001302 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1303 return false;
1304
Sam Kolton9dffada2017-01-17 15:26:02 +00001305 if (type.getScalarSizeInBits() == 16) {
1306 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001307 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001308 AsmParser->hasInv2PiInlineImm());
1309 }
1310
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001311 // Check if single precision literal is inlinable
1312 return AMDGPU::isInlinableLiteral32(
1313 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001314 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001315 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001316
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001317 // We got int literal token.
1318 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001319 return AMDGPU::isInlinableLiteral64(Imm.Val,
1320 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001321 }
1322
Matt Arsenault4bd72362016-12-10 00:39:12 +00001323 if (type.getScalarSizeInBits() == 16) {
1324 return AMDGPU::isInlinableLiteral16(
1325 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1326 AsmParser->hasInv2PiInlineImm());
1327 }
1328
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001329 return AMDGPU::isInlinableLiteral32(
1330 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001331 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001332}
1333
1334bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001335 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001336 if (!isImmTy(ImmTyNone)) {
1337 return false;
1338 }
1339
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001340 if (!Imm.IsFPImm) {
1341 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001342
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001343 if (type == MVT::f64 && hasFPModifiers()) {
1344 // Cannot apply fp modifiers to int literals preserving the same semantics
1345 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1346 // disable these cases.
1347 return false;
1348 }
1349
Matt Arsenault4bd72362016-12-10 00:39:12 +00001350 unsigned Size = type.getSizeInBits();
1351 if (Size == 64)
1352 Size = 32;
1353
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001354 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1355 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001356 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001357 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001358
1359 // We got fp literal token
1360 if (type == MVT::f64) { // Expected 64-bit fp operand
1361 // We would set low 64-bits of literal to zeroes but we accept this literals
1362 return true;
1363 }
1364
1365 if (type == MVT::i64) { // Expected 64-bit int operand
1366 // We don't allow fp literals in 64-bit integer instructions. It is
1367 // unclear how we should encode them.
1368 return false;
1369 }
1370
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001371 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001372 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001373}
1374
1375bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001376 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001377}
1378
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001379bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001380 if (AsmParser->isVI())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001381 return isVReg32();
Sam Kolton549c89d2017-06-21 08:53:38 +00001382 else if (AsmParser->isGFX9())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001383 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001384 else
1385 return false;
1386}
1387
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001388bool AMDGPUOperand::isSDWAFP16Operand() const {
1389 return isSDWAOperand(MVT::f16);
1390}
1391
1392bool AMDGPUOperand::isSDWAFP32Operand() const {
1393 return isSDWAOperand(MVT::f32);
1394}
1395
1396bool AMDGPUOperand::isSDWAInt16Operand() const {
1397 return isSDWAOperand(MVT::i16);
1398}
1399
1400bool AMDGPUOperand::isSDWAInt32Operand() const {
1401 return isSDWAOperand(MVT::i32);
1402}
1403
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001404uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1405{
1406 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1407 assert(Size == 2 || Size == 4 || Size == 8);
1408
1409 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1410
1411 if (Imm.Mods.Abs) {
1412 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001413 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001414 if (Imm.Mods.Neg) {
1415 Val ^= FpSignMask;
1416 }
1417
1418 return Val;
1419}
1420
1421void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001422 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1423 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001424 addLiteralImmOperand(Inst, Imm.Val,
1425 ApplyModifiers &
1426 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001427 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001428 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1429 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001430 }
1431}
1432
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001433void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001434 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1435 auto OpNum = Inst.getNumOperands();
1436 // Check that this operand accepts literals
1437 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1438
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001439 if (ApplyModifiers) {
1440 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1441 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1442 Val = applyInputFPModifiers(Val, Size);
1443 }
1444
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001445 APInt Literal(64, Val);
1446 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001447
1448 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001449 switch (OpTy) {
1450 case AMDGPU::OPERAND_REG_IMM_INT64:
1451 case AMDGPU::OPERAND_REG_IMM_FP64:
1452 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001453 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001454 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1455 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001456 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001457 return;
1458 }
1459
1460 // Non-inlineable
1461 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001462 // For fp operands we check if low 32 bits are zeros
1463 if (Literal.getLoBits(32) != 0) {
1464 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001465 "Can't encode literal as exact 64-bit floating-point operand. "
1466 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001467 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001468
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001469 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001470 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001471 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001472
1473 // We don't allow fp literals in 64-bit integer instructions. It is
1474 // unclear how we should encode them. This case should be checked earlier
1475 // in predicate methods (isLiteralImm())
1476 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001477
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001478 case AMDGPU::OPERAND_REG_IMM_INT32:
1479 case AMDGPU::OPERAND_REG_IMM_FP32:
1480 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1481 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1482 case AMDGPU::OPERAND_REG_IMM_INT16:
1483 case AMDGPU::OPERAND_REG_IMM_FP16:
1484 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1485 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1486 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1487 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001488 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001489 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001490 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001491 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001492 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001493 // We allow precision lost but not overflow or underflow. This should be
1494 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001495
1496 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1497 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1498 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1499 ImmVal |= (ImmVal << 16);
1500 }
1501
1502 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001503 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001504 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001505 default:
1506 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001507 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001508
1509 return;
1510 }
1511
1512 // We got int literal token.
1513 // Only sign extend inline immediates.
1514 // FIXME: No errors on truncation
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001515 switch (OpTy) {
1516 case AMDGPU::OPERAND_REG_IMM_INT32:
1517 case AMDGPU::OPERAND_REG_IMM_FP32:
1518 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001519 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001520 if (isInt<32>(Val) &&
1521 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1522 AsmParser->hasInv2PiInlineImm())) {
1523 Inst.addOperand(MCOperand::createImm(Val));
1524 return;
1525 }
1526
1527 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1528 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001529
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001530 case AMDGPU::OPERAND_REG_IMM_INT64:
1531 case AMDGPU::OPERAND_REG_IMM_FP64:
1532 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001533 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001534 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001535 Inst.addOperand(MCOperand::createImm(Val));
1536 return;
1537 }
1538
1539 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1540 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001541
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001542 case AMDGPU::OPERAND_REG_IMM_INT16:
1543 case AMDGPU::OPERAND_REG_IMM_FP16:
1544 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001545 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001546 if (isInt<16>(Val) &&
1547 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1548 AsmParser->hasInv2PiInlineImm())) {
1549 Inst.addOperand(MCOperand::createImm(Val));
1550 return;
1551 }
1552
1553 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1554 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001555
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001556 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1557 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1558 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1559 assert(AMDGPU::isInlinableLiteral16(LiteralVal,
1560 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001561
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001562 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1563 static_cast<uint32_t>(LiteralVal);
1564 Inst.addOperand(MCOperand::createImm(ImmVal));
1565 return;
1566 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001567 default:
1568 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001569 }
1570}
1571
Matt Arsenault4bd72362016-12-10 00:39:12 +00001572template <unsigned Bitwidth>
1573void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001574 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001575
1576 if (!Imm.IsFPImm) {
1577 // We got int literal token.
1578 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1579 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001580 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001581
1582 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001583 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001584 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1585 APFloat::rmNearestTiesToEven, &Lost);
1586 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001587}
1588
1589void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1590 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1591}
1592
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001593static bool isInlineValue(unsigned Reg) {
1594 switch (Reg) {
1595 case AMDGPU::SRC_SHARED_BASE:
1596 case AMDGPU::SRC_SHARED_LIMIT:
1597 case AMDGPU::SRC_PRIVATE_BASE:
1598 case AMDGPU::SRC_PRIVATE_LIMIT:
1599 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
1600 return true;
1601 default:
1602 return false;
1603 }
1604}
1605
1606bool AMDGPUOperand::isInlineValue() const {
1607 return isRegKind() && ::isInlineValue(getReg());
1608}
1609
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001610//===----------------------------------------------------------------------===//
1611// AsmParser
1612//===----------------------------------------------------------------------===//
1613
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001614static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1615 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001616 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001617 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001618 case 1: return AMDGPU::VGPR_32RegClassID;
1619 case 2: return AMDGPU::VReg_64RegClassID;
1620 case 3: return AMDGPU::VReg_96RegClassID;
1621 case 4: return AMDGPU::VReg_128RegClassID;
1622 case 8: return AMDGPU::VReg_256RegClassID;
1623 case 16: return AMDGPU::VReg_512RegClassID;
1624 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001625 } else if (Is == IS_TTMP) {
1626 switch (RegWidth) {
1627 default: return -1;
1628 case 1: return AMDGPU::TTMP_32RegClassID;
1629 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001630 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001631 case 8: return AMDGPU::TTMP_256RegClassID;
1632 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001633 }
1634 } else if (Is == IS_SGPR) {
1635 switch (RegWidth) {
1636 default: return -1;
1637 case 1: return AMDGPU::SGPR_32RegClassID;
1638 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001639 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001640 case 8: return AMDGPU::SGPR_256RegClassID;
1641 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001642 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001643 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001644 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001645}
1646
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001647static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001648 return StringSwitch<unsigned>(RegName)
1649 .Case("exec", AMDGPU::EXEC)
1650 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001651 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001652 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001653 .Case("shared_base", AMDGPU::SRC_SHARED_BASE)
1654 .Case("src_shared_base", AMDGPU::SRC_SHARED_BASE)
1655 .Case("shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1656 .Case("src_shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1657 .Case("private_base", AMDGPU::SRC_PRIVATE_BASE)
1658 .Case("src_private_base", AMDGPU::SRC_PRIVATE_BASE)
1659 .Case("private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1660 .Case("src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1661 .Case("pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
1662 .Case("src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001663 .Case("lds_direct", AMDGPU::LDS_DIRECT)
1664 .Case("src_lds_direct", AMDGPU::LDS_DIRECT)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001665 .Case("m0", AMDGPU::M0)
1666 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001667 .Case("tba", AMDGPU::TBA)
1668 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001669 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1670 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001671 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1672 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001673 .Case("vcc_lo", AMDGPU::VCC_LO)
1674 .Case("vcc_hi", AMDGPU::VCC_HI)
1675 .Case("exec_lo", AMDGPU::EXEC_LO)
1676 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001677 .Case("tma_lo", AMDGPU::TMA_LO)
1678 .Case("tma_hi", AMDGPU::TMA_HI)
1679 .Case("tba_lo", AMDGPU::TBA_LO)
1680 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001681 .Default(0);
1682}
1683
Eugene Zelenko66203762017-01-21 00:53:49 +00001684bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1685 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001686 auto R = parseRegister();
1687 if (!R) return true;
1688 assert(R->isReg());
1689 RegNo = R->getReg();
1690 StartLoc = R->getStartLoc();
1691 EndLoc = R->getEndLoc();
1692 return false;
1693}
1694
Eugene Zelenko66203762017-01-21 00:53:49 +00001695bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1696 RegisterKind RegKind, unsigned Reg1,
1697 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001698 switch (RegKind) {
1699 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001700 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1701 Reg = AMDGPU::EXEC;
1702 RegWidth = 2;
1703 return true;
1704 }
1705 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1706 Reg = AMDGPU::FLAT_SCR;
1707 RegWidth = 2;
1708 return true;
1709 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001710 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1711 Reg = AMDGPU::XNACK_MASK;
1712 RegWidth = 2;
1713 return true;
1714 }
Eugene Zelenko66203762017-01-21 00:53:49 +00001715 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1716 Reg = AMDGPU::VCC;
1717 RegWidth = 2;
1718 return true;
1719 }
1720 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1721 Reg = AMDGPU::TBA;
1722 RegWidth = 2;
1723 return true;
1724 }
1725 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1726 Reg = AMDGPU::TMA;
1727 RegWidth = 2;
1728 return true;
1729 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001730 return false;
1731 case IS_VGPR:
1732 case IS_SGPR:
1733 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001734 if (Reg1 != Reg + RegWidth) {
1735 return false;
1736 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001737 RegWidth++;
1738 return true;
1739 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001740 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001741 }
1742}
1743
Eugene Zelenko66203762017-01-21 00:53:49 +00001744bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1745 unsigned &RegNum, unsigned &RegWidth,
1746 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001747 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001748 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1749 if (getLexer().is(AsmToken::Identifier)) {
1750 StringRef RegName = Parser.getTok().getString();
1751 if ((Reg = getSpecialRegForName(RegName))) {
1752 Parser.Lex();
1753 RegKind = IS_SPECIAL;
1754 } else {
1755 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001756 if (RegName[0] == 'v') {
1757 RegNumIndex = 1;
1758 RegKind = IS_VGPR;
1759 } else if (RegName[0] == 's') {
1760 RegNumIndex = 1;
1761 RegKind = IS_SGPR;
1762 } else if (RegName.startswith("ttmp")) {
1763 RegNumIndex = strlen("ttmp");
1764 RegKind = IS_TTMP;
1765 } else {
1766 return false;
1767 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001768 if (RegName.size() > RegNumIndex) {
1769 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001770 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1771 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001772 Parser.Lex();
1773 RegWidth = 1;
1774 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001775 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001776 Parser.Lex();
1777 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001778 if (getLexer().isNot(AsmToken::LBrac))
1779 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001780 Parser.Lex();
1781
Artem Tamazovf88397c2016-06-03 14:41:17 +00001782 if (getParser().parseAbsoluteExpression(RegLo))
1783 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001784
Artem Tamazov7da9b822016-05-27 12:50:13 +00001785 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001786 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1787 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001788 Parser.Lex();
1789
Artem Tamazov7da9b822016-05-27 12:50:13 +00001790 if (isRBrace) {
1791 RegHi = RegLo;
1792 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001793 if (getParser().parseAbsoluteExpression(RegHi))
1794 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001795
Artem Tamazovf88397c2016-06-03 14:41:17 +00001796 if (getLexer().isNot(AsmToken::RBrac))
1797 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001798 Parser.Lex();
1799 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001800 RegNum = (unsigned) RegLo;
1801 RegWidth = (RegHi - RegLo) + 1;
1802 }
1803 }
1804 } else if (getLexer().is(AsmToken::LBrac)) {
1805 // List of consecutive registers: [s0,s1,s2,s3]
1806 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001807 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001808 return false;
1809 if (RegWidth != 1)
1810 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001811 RegisterKind RegKind1;
1812 unsigned Reg1, RegNum1, RegWidth1;
1813 do {
1814 if (getLexer().is(AsmToken::Comma)) {
1815 Parser.Lex();
1816 } else if (getLexer().is(AsmToken::RBrac)) {
1817 Parser.Lex();
1818 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001819 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001820 if (RegWidth1 != 1) {
1821 return false;
1822 }
1823 if (RegKind1 != RegKind) {
1824 return false;
1825 }
1826 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1827 return false;
1828 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001829 } else {
1830 return false;
1831 }
1832 } while (true);
1833 } else {
1834 return false;
1835 }
1836 switch (RegKind) {
1837 case IS_SPECIAL:
1838 RegNum = 0;
1839 RegWidth = 1;
1840 break;
1841 case IS_VGPR:
1842 case IS_SGPR:
1843 case IS_TTMP:
1844 {
1845 unsigned Size = 1;
1846 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001847 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001848 Size = std::min(RegWidth, 4u);
1849 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001850 if (RegNum % Size != 0)
1851 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001852 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001853 RegNum = RegNum / Size;
1854 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001855 if (RCID == -1)
1856 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001857 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001858 if (RegNum >= RC.getNumRegs())
1859 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001860 Reg = RC.getRegister(RegNum);
1861 break;
1862 }
1863
1864 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001865 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001866 }
1867
Artem Tamazovf88397c2016-06-03 14:41:17 +00001868 if (!subtargetHasRegister(*TRI, Reg))
1869 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001870 return true;
1871}
1872
Scott Linder1e8c2c72018-06-21 19:38:56 +00001873Optional<StringRef>
1874AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
1875 switch (RegKind) {
1876 case IS_VGPR:
1877 return StringRef(".amdgcn.next_free_vgpr");
1878 case IS_SGPR:
1879 return StringRef(".amdgcn.next_free_sgpr");
1880 default:
1881 return None;
1882 }
1883}
1884
1885void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
1886 auto SymbolName = getGprCountSymbolName(RegKind);
1887 assert(SymbolName && "initializing invalid register kind");
1888 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1889 Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
1890}
1891
1892bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
1893 unsigned DwordRegIndex,
1894 unsigned RegWidth) {
1895 // Symbols are only defined for GCN targets
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00001896 if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6)
Scott Linder1e8c2c72018-06-21 19:38:56 +00001897 return true;
1898
1899 auto SymbolName = getGprCountSymbolName(RegKind);
1900 if (!SymbolName)
1901 return true;
1902 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1903
1904 int64_t NewMax = DwordRegIndex + RegWidth - 1;
1905 int64_t OldCount;
1906
1907 if (!Sym->isVariable())
1908 return !Error(getParser().getTok().getLoc(),
1909 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
1910 if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
1911 return !Error(
1912 getParser().getTok().getLoc(),
1913 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
1914
1915 if (OldCount <= NewMax)
1916 Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
1917
1918 return true;
1919}
1920
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001921std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001922 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001923 SMLoc StartLoc = Tok.getLoc();
1924 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001925 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001926 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001927
Artem Tamazova01cce82016-12-27 16:00:11 +00001928 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001929 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001930 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00001931 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
1932 if (!updateGprCountSymbols(RegKind, DwordRegIndex, RegWidth))
1933 return nullptr;
1934 } else
1935 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001936 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001937}
1938
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001939bool
1940AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1941 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1942 (getLexer().getKind() == AsmToken::Integer ||
1943 getLexer().getKind() == AsmToken::Real)) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001944 // This is a workaround for handling operands like these:
1945 // |1.0|
1946 // |-1|
1947 // This syntax is not compatible with syntax of standard
1948 // MC expressions (due to the trailing '|').
1949
1950 SMLoc EndLoc;
1951 const MCExpr *Expr;
1952
1953 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1954 return true;
1955 }
1956
1957 return !Expr->evaluateAsAbsolute(Val);
1958 }
1959
1960 return getParser().parseAbsoluteExpression(Val);
1961}
1962
Alex Bradbury58eba092016-11-01 16:32:05 +00001963OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001964AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001965 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001966 bool Minus = false;
1967 if (getLexer().getKind() == AsmToken::Minus) {
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00001968 const AsmToken NextToken = getLexer().peekTok();
1969 if (!NextToken.is(AsmToken::Integer) &&
1970 !NextToken.is(AsmToken::Real)) {
1971 return MatchOperand_NoMatch;
1972 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001973 Minus = true;
1974 Parser.Lex();
1975 }
1976
1977 SMLoc S = Parser.getTok().getLoc();
1978 switch(getLexer().getKind()) {
1979 case AsmToken::Integer: {
1980 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001981 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001982 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001983 if (Minus)
1984 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001985 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001986 return MatchOperand_Success;
1987 }
1988 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001989 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001990 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001991 return MatchOperand_ParseFail;
1992
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001993 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001994 if (Minus)
1995 F.changeSign();
1996 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001997 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001998 AMDGPUOperand::ImmTyNone, true));
1999 return MatchOperand_Success;
2000 }
2001 default:
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00002002 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002003 }
2004}
2005
Alex Bradbury58eba092016-11-01 16:32:05 +00002006OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00002007AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002008 if (auto R = parseRegister()) {
2009 assert(R->isReg());
2010 R->Reg.IsForcedVOP3 = isForcedVOP3();
2011 Operands.push_back(std::move(R));
2012 return MatchOperand_Success;
2013 }
Sam Kolton9772eb32017-01-11 11:46:30 +00002014 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002015}
2016
Alex Bradbury58eba092016-11-01 16:32:05 +00002017OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002018AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
2019 auto res = parseImm(Operands, AbsMod);
Sam Kolton9772eb32017-01-11 11:46:30 +00002020 if (res != MatchOperand_NoMatch) {
2021 return res;
2022 }
2023
2024 return parseReg(Operands);
2025}
2026
2027OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002028AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
2029 bool AllowImm) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002030 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002031
2032 if (getLexer().getKind()== AsmToken::Minus) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002033 const AsmToken NextToken = getLexer().peekTok();
2034
2035 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
2036 if (NextToken.is(AsmToken::Minus)) {
2037 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
2038 return MatchOperand_ParseFail;
2039 }
2040
2041 // '-' followed by an integer literal N should be interpreted as integer
2042 // negation rather than a floating-point NEG modifier applied to N.
2043 // Beside being contr-intuitive, such use of floating-point NEG modifier
2044 // results in different meaning of integer literals used with VOP1/2/C
2045 // and VOP3, for example:
2046 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
2047 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
2048 // Negative fp literals should be handled likewise for unifomtity
2049 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
2050 Parser.Lex();
2051 Negate = true;
2052 }
2053 }
2054
2055 if (getLexer().getKind() == AsmToken::Identifier &&
2056 Parser.getTok().getString() == "neg") {
2057 if (Negate) {
2058 Error(Parser.getTok().getLoc(), "expected register or immediate");
2059 return MatchOperand_ParseFail;
2060 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002061 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002062 Negate2 = true;
2063 if (getLexer().isNot(AsmToken::LParen)) {
2064 Error(Parser.getTok().getLoc(), "expected left paren after neg");
2065 return MatchOperand_ParseFail;
2066 }
2067 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002068 }
2069
Eugene Zelenko66203762017-01-21 00:53:49 +00002070 if (getLexer().getKind() == AsmToken::Identifier &&
2071 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002072 Parser.Lex();
2073 Abs2 = true;
2074 if (getLexer().isNot(AsmToken::LParen)) {
2075 Error(Parser.getTok().getLoc(), "expected left paren after abs");
2076 return MatchOperand_ParseFail;
2077 }
2078 Parser.Lex();
2079 }
2080
2081 if (getLexer().getKind() == AsmToken::Pipe) {
2082 if (Abs2) {
2083 Error(Parser.getTok().getLoc(), "expected register or immediate");
2084 return MatchOperand_ParseFail;
2085 }
2086 Parser.Lex();
2087 Abs = true;
2088 }
2089
Sam Kolton9772eb32017-01-11 11:46:30 +00002090 OperandMatchResultTy Res;
2091 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002092 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00002093 } else {
2094 Res = parseReg(Operands);
2095 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002096 if (Res != MatchOperand_Success) {
2097 return Res;
2098 }
2099
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002100 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002101 if (Abs) {
2102 if (getLexer().getKind() != AsmToken::Pipe) {
2103 Error(Parser.getTok().getLoc(), "expected vertical bar");
2104 return MatchOperand_ParseFail;
2105 }
2106 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002107 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002108 }
2109 if (Abs2) {
2110 if (getLexer().isNot(AsmToken::RParen)) {
2111 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2112 return MatchOperand_ParseFail;
2113 }
2114 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002115 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002116 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002117
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002118 if (Negate) {
2119 Mods.Neg = true;
2120 } else if (Negate2) {
2121 if (getLexer().isNot(AsmToken::RParen)) {
2122 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2123 return MatchOperand_ParseFail;
2124 }
2125 Parser.Lex();
2126 Mods.Neg = true;
2127 }
2128
Sam Kolton945231a2016-06-10 09:57:59 +00002129 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002130 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002131 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00002132 }
2133 return MatchOperand_Success;
2134}
2135
Alex Bradbury58eba092016-11-01 16:32:05 +00002136OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002137AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
2138 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00002139 bool Sext = false;
2140
Eugene Zelenko66203762017-01-21 00:53:49 +00002141 if (getLexer().getKind() == AsmToken::Identifier &&
2142 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00002143 Parser.Lex();
2144 Sext = true;
2145 if (getLexer().isNot(AsmToken::LParen)) {
2146 Error(Parser.getTok().getLoc(), "expected left paren after sext");
2147 return MatchOperand_ParseFail;
2148 }
2149 Parser.Lex();
2150 }
2151
Sam Kolton9772eb32017-01-11 11:46:30 +00002152 OperandMatchResultTy Res;
2153 if (AllowImm) {
2154 Res = parseRegOrImm(Operands);
2155 } else {
2156 Res = parseReg(Operands);
2157 }
Sam Kolton945231a2016-06-10 09:57:59 +00002158 if (Res != MatchOperand_Success) {
2159 return Res;
2160 }
2161
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002162 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00002163 if (Sext) {
2164 if (getLexer().isNot(AsmToken::RParen)) {
2165 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2166 return MatchOperand_ParseFail;
2167 }
2168 Parser.Lex();
2169 Mods.Sext = true;
2170 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002171
Sam Kolton945231a2016-06-10 09:57:59 +00002172 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002173 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002174 Op.setModifiers(Mods);
2175 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002176
Sam Kolton945231a2016-06-10 09:57:59 +00002177 return MatchOperand_Success;
2178}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002179
Sam Kolton9772eb32017-01-11 11:46:30 +00002180OperandMatchResultTy
2181AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2182 return parseRegOrImmWithFPInputMods(Operands, false);
2183}
2184
2185OperandMatchResultTy
2186AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2187 return parseRegOrImmWithIntInputMods(Operands, false);
2188}
2189
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002190OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
2191 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2192 if (Reg) {
2193 Operands.push_back(std::move(Reg));
2194 return MatchOperand_Success;
2195 }
2196
2197 const AsmToken &Tok = Parser.getTok();
2198 if (Tok.getString() == "off") {
2199 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
2200 AMDGPUOperand::ImmTyOff, false));
2201 Parser.Lex();
2202 return MatchOperand_Success;
2203 }
2204
2205 return MatchOperand_NoMatch;
2206}
2207
Tom Stellard45bb48e2015-06-13 03:28:10 +00002208unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002209 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2210
2211 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002212 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2213 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2214 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002215 return Match_InvalidOperand;
2216
Tom Stellard88e0b252015-10-06 15:57:53 +00002217 if ((TSFlags & SIInstrFlags::VOP3) &&
2218 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2219 getForcedEncodingSize() != 64)
2220 return Match_PreferE32;
2221
Sam Koltona568e3d2016-12-22 12:57:41 +00002222 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2223 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002224 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002225 auto OpNum =
2226 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002227 const auto &Op = Inst.getOperand(OpNum);
2228 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2229 return Match_InvalidOperand;
2230 }
2231 }
2232
Matt Arsenaultfd023142017-06-12 15:55:58 +00002233 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2234 // FIXME: Produces error without correct column reported.
2235 auto OpNum =
2236 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2237 const auto &Op = Inst.getOperand(OpNum);
2238 if (Op.getImm() != 0)
2239 return Match_InvalidOperand;
2240 }
2241
Tom Stellard45bb48e2015-06-13 03:28:10 +00002242 return Match_Success;
2243}
2244
Matt Arsenault5f45e782017-01-09 18:44:11 +00002245// What asm variants we should check
2246ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2247 if (getForcedEncodingSize() == 32) {
2248 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2249 return makeArrayRef(Variants);
2250 }
2251
2252 if (isForcedVOP3()) {
2253 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2254 return makeArrayRef(Variants);
2255 }
2256
2257 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002258 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2259 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002260 return makeArrayRef(Variants);
2261 }
2262
2263 if (isForcedDPP()) {
2264 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2265 return makeArrayRef(Variants);
2266 }
2267
2268 static const unsigned Variants[] = {
2269 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002270 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002271 };
2272
2273 return makeArrayRef(Variants);
2274}
2275
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002276unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2277 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2278 const unsigned Num = Desc.getNumImplicitUses();
2279 for (unsigned i = 0; i < Num; ++i) {
2280 unsigned Reg = Desc.ImplicitUses[i];
2281 switch (Reg) {
2282 case AMDGPU::FLAT_SCR:
2283 case AMDGPU::VCC:
2284 case AMDGPU::M0:
2285 return Reg;
2286 default:
2287 break;
2288 }
2289 }
2290 return AMDGPU::NoRegister;
2291}
2292
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002293// NB: This code is correct only when used to check constant
2294// bus limitations because GFX7 support no f16 inline constants.
2295// Note that there are no cases when a GFX7 opcode violates
2296// constant bus limitations due to the use of an f16 constant.
2297bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2298 unsigned OpIdx) const {
2299 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2300
2301 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2302 return false;
2303 }
2304
2305 const MCOperand &MO = Inst.getOperand(OpIdx);
2306
2307 int64_t Val = MO.getImm();
2308 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2309
2310 switch (OpSize) { // expected operand size
2311 case 8:
2312 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2313 case 4:
2314 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2315 case 2: {
2316 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2317 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2318 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2319 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2320 } else {
2321 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2322 }
2323 }
2324 default:
2325 llvm_unreachable("invalid operand size");
2326 }
2327}
2328
2329bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2330 const MCOperand &MO = Inst.getOperand(OpIdx);
2331 if (MO.isImm()) {
2332 return !isInlineConstant(Inst, OpIdx);
2333 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002334 return !MO.isReg() ||
2335 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002336}
2337
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002338bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002339 const unsigned Opcode = Inst.getOpcode();
2340 const MCInstrDesc &Desc = MII.get(Opcode);
2341 unsigned ConstantBusUseCount = 0;
2342
2343 if (Desc.TSFlags &
2344 (SIInstrFlags::VOPC |
2345 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002346 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2347 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002348 // Check special imm operands (used by madmk, etc)
2349 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2350 ++ConstantBusUseCount;
2351 }
2352
2353 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2354 if (SGPRUsed != AMDGPU::NoRegister) {
2355 ++ConstantBusUseCount;
2356 }
2357
2358 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2359 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2360 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2361
2362 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2363
2364 for (int OpIdx : OpIndices) {
2365 if (OpIdx == -1) break;
2366
2367 const MCOperand &MO = Inst.getOperand(OpIdx);
2368 if (usesConstantBus(Inst, OpIdx)) {
2369 if (MO.isReg()) {
2370 const unsigned Reg = mc2PseudoReg(MO.getReg());
2371 // Pairs of registers with a partial intersections like these
2372 // s0, s[0:1]
2373 // flat_scratch_lo, flat_scratch
2374 // flat_scratch_lo, flat_scratch_hi
2375 // are theoretically valid but they are disabled anyway.
2376 // Note that this code mimics SIInstrInfo::verifyInstruction
2377 if (Reg != SGPRUsed) {
2378 ++ConstantBusUseCount;
2379 }
2380 SGPRUsed = Reg;
2381 } else { // Expression or a literal
2382 ++ConstantBusUseCount;
2383 }
2384 }
2385 }
2386 }
2387
2388 return ConstantBusUseCount <= 1;
2389}
2390
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002391bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002392 const unsigned Opcode = Inst.getOpcode();
2393 const MCInstrDesc &Desc = MII.get(Opcode);
2394
2395 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2396 if (DstIdx == -1 ||
2397 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2398 return true;
2399 }
2400
2401 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2402
2403 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2404 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2405 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2406
2407 assert(DstIdx != -1);
2408 const MCOperand &Dst = Inst.getOperand(DstIdx);
2409 assert(Dst.isReg());
2410 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2411
2412 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2413
2414 for (int SrcIdx : SrcIndices) {
2415 if (SrcIdx == -1) break;
2416 const MCOperand &Src = Inst.getOperand(SrcIdx);
2417 if (Src.isReg()) {
2418 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2419 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2420 return false;
2421 }
2422 }
2423 }
2424
2425 return true;
2426}
2427
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002428bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2429
2430 const unsigned Opc = Inst.getOpcode();
2431 const MCInstrDesc &Desc = MII.get(Opc);
2432
2433 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2434 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2435 assert(ClampIdx != -1);
2436 return Inst.getOperand(ClampIdx).getImm() == 0;
2437 }
2438
2439 return true;
2440}
2441
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002442bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2443
2444 const unsigned Opc = Inst.getOpcode();
2445 const MCInstrDesc &Desc = MII.get(Opc);
2446
2447 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2448 return true;
2449
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002450 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2451 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2452 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2453
2454 assert(VDataIdx != -1);
2455 assert(DMaskIdx != -1);
2456 assert(TFEIdx != -1);
2457
2458 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2459 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2460 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2461 if (DMask == 0)
2462 DMask = 1;
2463
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002464 unsigned DataSize =
2465 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2466 if (hasPackedD16()) {
2467 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2468 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2469 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002470 }
2471
2472 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002473}
2474
2475bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2476
2477 const unsigned Opc = Inst.getOpcode();
2478 const MCInstrDesc &Desc = MII.get(Opc);
2479
2480 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2481 return true;
2482 if (!Desc.mayLoad() || !Desc.mayStore())
2483 return true; // Not atomic
2484
2485 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2486 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2487
2488 // This is an incomplete check because image_atomic_cmpswap
2489 // may only use 0x3 and 0xf while other atomic operations
2490 // may use 0x1 and 0x3. However these limitations are
2491 // verified when we check that dmask matches dst size.
2492 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2493}
2494
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002495bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
2496
2497 const unsigned Opc = Inst.getOpcode();
2498 const MCInstrDesc &Desc = MII.get(Opc);
2499
2500 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
2501 return true;
2502
2503 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2504 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2505
2506 // GATHER4 instructions use dmask in a different fashion compared to
2507 // other MIMG instructions. The only useful DMASK values are
2508 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2509 // (red,red,red,red) etc.) The ISA document doesn't mention
2510 // this.
2511 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
2512}
2513
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002514bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2515
2516 const unsigned Opc = Inst.getOpcode();
2517 const MCInstrDesc &Desc = MII.get(Opc);
2518
2519 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2520 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002521
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002522 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2523 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
2524 if (isCI() || isSI())
2525 return false;
2526 }
2527
2528 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002529}
2530
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002531static bool IsRevOpcode(const unsigned Opcode)
2532{
2533 switch (Opcode) {
2534 case AMDGPU::V_SUBREV_F32_e32:
2535 case AMDGPU::V_SUBREV_F32_e64:
2536 case AMDGPU::V_SUBREV_F32_e32_si:
2537 case AMDGPU::V_SUBREV_F32_e32_vi:
2538 case AMDGPU::V_SUBREV_F32_e64_si:
2539 case AMDGPU::V_SUBREV_F32_e64_vi:
2540 case AMDGPU::V_SUBREV_I32_e32:
2541 case AMDGPU::V_SUBREV_I32_e64:
2542 case AMDGPU::V_SUBREV_I32_e32_si:
2543 case AMDGPU::V_SUBREV_I32_e64_si:
2544 case AMDGPU::V_SUBBREV_U32_e32:
2545 case AMDGPU::V_SUBBREV_U32_e64:
2546 case AMDGPU::V_SUBBREV_U32_e32_si:
2547 case AMDGPU::V_SUBBREV_U32_e32_vi:
2548 case AMDGPU::V_SUBBREV_U32_e64_si:
2549 case AMDGPU::V_SUBBREV_U32_e64_vi:
2550 case AMDGPU::V_SUBREV_U32_e32:
2551 case AMDGPU::V_SUBREV_U32_e64:
2552 case AMDGPU::V_SUBREV_U32_e32_gfx9:
2553 case AMDGPU::V_SUBREV_U32_e32_vi:
2554 case AMDGPU::V_SUBREV_U32_e64_gfx9:
2555 case AMDGPU::V_SUBREV_U32_e64_vi:
2556 case AMDGPU::V_SUBREV_F16_e32:
2557 case AMDGPU::V_SUBREV_F16_e64:
2558 case AMDGPU::V_SUBREV_F16_e32_vi:
2559 case AMDGPU::V_SUBREV_F16_e64_vi:
2560 case AMDGPU::V_SUBREV_U16_e32:
2561 case AMDGPU::V_SUBREV_U16_e64:
2562 case AMDGPU::V_SUBREV_U16_e32_vi:
2563 case AMDGPU::V_SUBREV_U16_e64_vi:
2564 case AMDGPU::V_SUBREV_CO_U32_e32_gfx9:
2565 case AMDGPU::V_SUBREV_CO_U32_e64_gfx9:
2566 case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9:
2567 case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9:
2568 case AMDGPU::V_LSHLREV_B32_e32_si:
2569 case AMDGPU::V_LSHLREV_B32_e64_si:
2570 case AMDGPU::V_LSHLREV_B16_e32_vi:
2571 case AMDGPU::V_LSHLREV_B16_e64_vi:
2572 case AMDGPU::V_LSHLREV_B32_e32_vi:
2573 case AMDGPU::V_LSHLREV_B32_e64_vi:
2574 case AMDGPU::V_LSHLREV_B64_vi:
2575 case AMDGPU::V_LSHRREV_B32_e32_si:
2576 case AMDGPU::V_LSHRREV_B32_e64_si:
2577 case AMDGPU::V_LSHRREV_B16_e32_vi:
2578 case AMDGPU::V_LSHRREV_B16_e64_vi:
2579 case AMDGPU::V_LSHRREV_B32_e32_vi:
2580 case AMDGPU::V_LSHRREV_B32_e64_vi:
2581 case AMDGPU::V_LSHRREV_B64_vi:
2582 case AMDGPU::V_ASHRREV_I32_e64_si:
2583 case AMDGPU::V_ASHRREV_I32_e32_si:
2584 case AMDGPU::V_ASHRREV_I16_e32_vi:
2585 case AMDGPU::V_ASHRREV_I16_e64_vi:
2586 case AMDGPU::V_ASHRREV_I32_e32_vi:
2587 case AMDGPU::V_ASHRREV_I32_e64_vi:
2588 case AMDGPU::V_ASHRREV_I64_vi:
2589 case AMDGPU::V_PK_LSHLREV_B16_vi:
2590 case AMDGPU::V_PK_LSHRREV_B16_vi:
2591 case AMDGPU::V_PK_ASHRREV_I16_vi:
2592 return true;
2593 default:
2594 return false;
2595 }
2596}
2597
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002598bool AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
2599
2600 using namespace SIInstrFlags;
2601 const unsigned Opcode = Inst.getOpcode();
2602 const MCInstrDesc &Desc = MII.get(Opcode);
2603
2604 // lds_direct register is defined so that it can be used
2605 // with 9-bit operands only. Ignore encodings which do not accept these.
2606 if ((Desc.TSFlags & (VOP1 | VOP2 | VOP3 | VOPC | VOP3P | SIInstrFlags::SDWA)) == 0)
2607 return true;
2608
2609 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2610 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2611 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2612
2613 const int SrcIndices[] = { Src1Idx, Src2Idx };
2614
2615 // lds_direct cannot be specified as either src1 or src2.
2616 for (int SrcIdx : SrcIndices) {
2617 if (SrcIdx == -1) break;
2618 const MCOperand &Src = Inst.getOperand(SrcIdx);
2619 if (Src.isReg() && Src.getReg() == LDS_DIRECT) {
2620 return false;
2621 }
2622 }
2623
2624 if (Src0Idx == -1)
2625 return true;
2626
2627 const MCOperand &Src = Inst.getOperand(Src0Idx);
2628 if (!Src.isReg() || Src.getReg() != LDS_DIRECT)
2629 return true;
2630
2631 // lds_direct is specified as src0. Check additional limitations.
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002632 return (Desc.TSFlags & SIInstrFlags::SDWA) == 0 && !IsRevOpcode(Opcode);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002633}
2634
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00002635bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst) const {
2636 unsigned Opcode = Inst.getOpcode();
2637 const MCInstrDesc &Desc = MII.get(Opcode);
2638 if (!(Desc.TSFlags & (SIInstrFlags::SOP2 | SIInstrFlags::SOPC)))
2639 return true;
2640
2641 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2642 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2643
2644 const int OpIndices[] = { Src0Idx, Src1Idx };
2645
2646 unsigned NumLiterals = 0;
2647 uint32_t LiteralValue;
2648
2649 for (int OpIdx : OpIndices) {
2650 if (OpIdx == -1) break;
2651
2652 const MCOperand &MO = Inst.getOperand(OpIdx);
2653 if (MO.isImm() &&
2654 // Exclude special imm operands (like that used by s_set_gpr_idx_on)
2655 AMDGPU::isSISrcOperand(Desc, OpIdx) &&
2656 !isInlineConstant(Inst, OpIdx)) {
2657 uint32_t Value = static_cast<uint32_t>(MO.getImm());
2658 if (NumLiterals == 0 || LiteralValue != Value) {
2659 LiteralValue = Value;
2660 ++NumLiterals;
2661 }
2662 }
2663 }
2664
2665 return NumLiterals <= 1;
2666}
2667
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002668bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2669 const SMLoc &IDLoc) {
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002670 if (!validateLdsDirect(Inst)) {
2671 Error(IDLoc,
2672 "invalid use of lds_direct");
2673 return false;
2674 }
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00002675 if (!validateSOPLiteral(Inst)) {
2676 Error(IDLoc,
2677 "only one literal operand is allowed");
2678 return false;
2679 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002680 if (!validateConstantBusLimitations(Inst)) {
2681 Error(IDLoc,
2682 "invalid operand (violates constant bus restrictions)");
2683 return false;
2684 }
2685 if (!validateEarlyClobberLimitations(Inst)) {
2686 Error(IDLoc,
2687 "destination must be different than all sources");
2688 return false;
2689 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002690 if (!validateIntClampSupported(Inst)) {
2691 Error(IDLoc,
2692 "integer clamping is not supported on this GPU");
2693 return false;
2694 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002695 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
2696 if (!validateMIMGD16(Inst)) {
2697 Error(IDLoc,
2698 "d16 modifier is not supported on this GPU");
2699 return false;
2700 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002701 if (!validateMIMGDataSize(Inst)) {
2702 Error(IDLoc,
2703 "image data size does not match dmask and tfe");
2704 return false;
2705 }
2706 if (!validateMIMGAtomicDMask(Inst)) {
2707 Error(IDLoc,
2708 "invalid atomic image dmask");
2709 return false;
2710 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002711 if (!validateMIMGGatherDMask(Inst)) {
2712 Error(IDLoc,
2713 "invalid image_gather dmask: only one bit must be set");
2714 return false;
2715 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002716
2717 return true;
2718}
2719
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00002720static std::string AMDGPUMnemonicSpellCheck(StringRef S,
2721 const FeatureBitset &FBS,
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002722 unsigned VariantID = 0);
2723
Tom Stellard45bb48e2015-06-13 03:28:10 +00002724bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2725 OperandVector &Operands,
2726 MCStreamer &Out,
2727 uint64_t &ErrorInfo,
2728 bool MatchingInlineAsm) {
2729 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002730 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002731 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002732 uint64_t EI;
2733 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2734 Variant);
2735 // We order match statuses from least to most specific. We use most specific
2736 // status as resulting
2737 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2738 if ((R == Match_Success) ||
2739 (R == Match_PreferE32) ||
2740 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2741 (R == Match_InvalidOperand && Result != Match_MissingFeature
2742 && Result != Match_PreferE32) ||
2743 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2744 && Result != Match_MissingFeature
2745 && Result != Match_PreferE32)) {
2746 Result = R;
2747 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002748 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002749 if (R == Match_Success)
2750 break;
2751 }
2752
2753 switch (Result) {
2754 default: break;
2755 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002756 if (!validateInstruction(Inst, IDLoc)) {
2757 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002758 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002759 Inst.setLoc(IDLoc);
2760 Out.EmitInstruction(Inst, getSTI());
2761 return false;
2762
2763 case Match_MissingFeature:
2764 return Error(IDLoc, "instruction not supported on this GPU");
2765
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002766 case Match_MnemonicFail: {
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00002767 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002768 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2769 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2770 return Error(IDLoc, "invalid instruction" + Suggestion,
2771 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2772 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002773
2774 case Match_InvalidOperand: {
2775 SMLoc ErrorLoc = IDLoc;
2776 if (ErrorInfo != ~0ULL) {
2777 if (ErrorInfo >= Operands.size()) {
2778 return Error(IDLoc, "too few operands for instruction");
2779 }
2780 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2781 if (ErrorLoc == SMLoc())
2782 ErrorLoc = IDLoc;
2783 }
2784 return Error(ErrorLoc, "invalid operand for instruction");
2785 }
2786
2787 case Match_PreferE32:
2788 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2789 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002790 }
2791 llvm_unreachable("Implement any new match types added!");
2792}
2793
Artem Tamazov25478d82016-12-29 15:41:52 +00002794bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2795 int64_t Tmp = -1;
2796 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2797 return true;
2798 }
2799 if (getParser().parseAbsoluteExpression(Tmp)) {
2800 return true;
2801 }
2802 Ret = static_cast<uint32_t>(Tmp);
2803 return false;
2804}
2805
Tom Stellard347ac792015-06-26 21:15:07 +00002806bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2807 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002808 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002809 return TokError("invalid major version");
2810
Tom Stellard347ac792015-06-26 21:15:07 +00002811 if (getLexer().isNot(AsmToken::Comma))
2812 return TokError("minor version number required, comma expected");
2813 Lex();
2814
Artem Tamazov25478d82016-12-29 15:41:52 +00002815 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002816 return TokError("invalid minor version");
2817
Tom Stellard347ac792015-06-26 21:15:07 +00002818 return false;
2819}
2820
Scott Linder1e8c2c72018-06-21 19:38:56 +00002821bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
2822 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2823 return TokError("directive only supported for amdgcn architecture");
2824
2825 std::string Target;
2826
2827 SMLoc TargetStart = getTok().getLoc();
2828 if (getParser().parseEscapedString(Target))
2829 return true;
2830 SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
2831
2832 std::string ExpectedTarget;
2833 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
2834 IsaInfo::streamIsaVersion(&getSTI(), ExpectedTargetOS);
2835
2836 if (Target != ExpectedTargetOS.str())
2837 return getParser().Error(TargetRange.Start, "target must match options",
2838 TargetRange);
2839
2840 getTargetStreamer().EmitDirectiveAMDGCNTarget(Target);
2841 return false;
2842}
2843
2844bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
2845 return getParser().Error(Range.Start, "value out of range", Range);
2846}
2847
2848bool AMDGPUAsmParser::calculateGPRBlocks(
2849 const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
2850 bool XNACKUsed, unsigned NextFreeVGPR, SMRange VGPRRange,
2851 unsigned NextFreeSGPR, SMRange SGPRRange, unsigned &VGPRBlocks,
2852 unsigned &SGPRBlocks) {
2853 // TODO(scott.linder): These calculations are duplicated from
2854 // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002855 IsaVersion Version = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002856
2857 unsigned NumVGPRs = NextFreeVGPR;
2858 unsigned NumSGPRs = NextFreeSGPR;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002859 unsigned MaxAddressableNumSGPRs = IsaInfo::getAddressableNumSGPRs(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002860
2861 if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
2862 NumSGPRs > MaxAddressableNumSGPRs)
2863 return OutOfRangeError(SGPRRange);
2864
2865 NumSGPRs +=
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002866 IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed);
Scott Linder1e8c2c72018-06-21 19:38:56 +00002867
2868 if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
2869 NumSGPRs > MaxAddressableNumSGPRs)
2870 return OutOfRangeError(SGPRRange);
2871
2872 if (Features.test(FeatureSGPRInitBug))
2873 NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
2874
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002875 VGPRBlocks = IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs);
2876 SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs);
Scott Linder1e8c2c72018-06-21 19:38:56 +00002877
2878 return false;
2879}
2880
2881bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
2882 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2883 return TokError("directive only supported for amdgcn architecture");
2884
2885 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
2886 return TokError("directive only supported for amdhsa OS");
2887
2888 StringRef KernelName;
2889 if (getParser().parseIdentifier(KernelName))
2890 return true;
2891
2892 kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor();
2893
2894 StringSet<> Seen;
2895
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002896 IsaVersion IVersion = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002897
2898 SMRange VGPRRange;
2899 uint64_t NextFreeVGPR = 0;
2900 SMRange SGPRRange;
2901 uint64_t NextFreeSGPR = 0;
2902 unsigned UserSGPRCount = 0;
2903 bool ReserveVCC = true;
2904 bool ReserveFlatScr = true;
2905 bool ReserveXNACK = hasXNACK();
2906
2907 while (true) {
2908 while (getLexer().is(AsmToken::EndOfStatement))
2909 Lex();
2910
2911 if (getLexer().isNot(AsmToken::Identifier))
2912 return TokError("expected .amdhsa_ directive or .end_amdhsa_kernel");
2913
2914 StringRef ID = getTok().getIdentifier();
2915 SMRange IDRange = getTok().getLocRange();
2916 Lex();
2917
2918 if (ID == ".end_amdhsa_kernel")
2919 break;
2920
2921 if (Seen.find(ID) != Seen.end())
2922 return TokError(".amdhsa_ directives cannot be repeated");
2923 Seen.insert(ID);
2924
2925 SMLoc ValStart = getTok().getLoc();
2926 int64_t IVal;
2927 if (getParser().parseAbsoluteExpression(IVal))
2928 return true;
2929 SMLoc ValEnd = getTok().getLoc();
2930 SMRange ValRange = SMRange(ValStart, ValEnd);
2931
2932 if (IVal < 0)
2933 return OutOfRangeError(ValRange);
2934
2935 uint64_t Val = IVal;
2936
2937#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
2938 if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
2939 return OutOfRangeError(RANGE); \
2940 AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
2941
2942 if (ID == ".amdhsa_group_segment_fixed_size") {
2943 if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
2944 return OutOfRangeError(ValRange);
2945 KD.group_segment_fixed_size = Val;
2946 } else if (ID == ".amdhsa_private_segment_fixed_size") {
2947 if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
2948 return OutOfRangeError(ValRange);
2949 KD.private_segment_fixed_size = Val;
2950 } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
2951 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2952 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
2953 Val, ValRange);
2954 UserSGPRCount++;
2955 } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
2956 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2957 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
2958 ValRange);
2959 UserSGPRCount++;
2960 } else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
2961 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2962 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
2963 ValRange);
2964 UserSGPRCount++;
2965 } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
2966 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2967 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
2968 Val, ValRange);
2969 UserSGPRCount++;
2970 } else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
2971 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2972 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
2973 ValRange);
2974 UserSGPRCount++;
2975 } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
2976 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2977 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
2978 ValRange);
2979 UserSGPRCount++;
2980 } else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
2981 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2982 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
2983 Val, ValRange);
2984 UserSGPRCount++;
2985 } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
2986 PARSE_BITS_ENTRY(
2987 KD.compute_pgm_rsrc2,
2988 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET, Val,
2989 ValRange);
2990 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
2991 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2992 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
2993 ValRange);
2994 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
2995 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2996 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
2997 ValRange);
2998 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
2999 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3000 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
3001 ValRange);
3002 } else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
3003 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3004 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
3005 ValRange);
3006 } else if (ID == ".amdhsa_system_vgpr_workitem_id") {
3007 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3008 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
3009 ValRange);
3010 } else if (ID == ".amdhsa_next_free_vgpr") {
3011 VGPRRange = ValRange;
3012 NextFreeVGPR = Val;
3013 } else if (ID == ".amdhsa_next_free_sgpr") {
3014 SGPRRange = ValRange;
3015 NextFreeSGPR = Val;
3016 } else if (ID == ".amdhsa_reserve_vcc") {
3017 if (!isUInt<1>(Val))
3018 return OutOfRangeError(ValRange);
3019 ReserveVCC = Val;
3020 } else if (ID == ".amdhsa_reserve_flat_scratch") {
3021 if (IVersion.Major < 7)
3022 return getParser().Error(IDRange.Start, "directive requires gfx7+",
3023 IDRange);
3024 if (!isUInt<1>(Val))
3025 return OutOfRangeError(ValRange);
3026 ReserveFlatScr = Val;
3027 } else if (ID == ".amdhsa_reserve_xnack_mask") {
3028 if (IVersion.Major < 8)
3029 return getParser().Error(IDRange.Start, "directive requires gfx8+",
3030 IDRange);
3031 if (!isUInt<1>(Val))
3032 return OutOfRangeError(ValRange);
3033 ReserveXNACK = Val;
3034 } else if (ID == ".amdhsa_float_round_mode_32") {
3035 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3036 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
3037 } else if (ID == ".amdhsa_float_round_mode_16_64") {
3038 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3039 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
3040 } else if (ID == ".amdhsa_float_denorm_mode_32") {
3041 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3042 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
3043 } else if (ID == ".amdhsa_float_denorm_mode_16_64") {
3044 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3045 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
3046 ValRange);
3047 } else if (ID == ".amdhsa_dx10_clamp") {
3048 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3049 COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
3050 } else if (ID == ".amdhsa_ieee_mode") {
3051 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
3052 Val, ValRange);
3053 } else if (ID == ".amdhsa_fp16_overflow") {
3054 if (IVersion.Major < 9)
3055 return getParser().Error(IDRange.Start, "directive requires gfx9+",
3056 IDRange);
3057 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
3058 ValRange);
3059 } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
3060 PARSE_BITS_ENTRY(
3061 KD.compute_pgm_rsrc2,
3062 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
3063 ValRange);
3064 } else if (ID == ".amdhsa_exception_fp_denorm_src") {
3065 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3066 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
3067 Val, ValRange);
3068 } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
3069 PARSE_BITS_ENTRY(
3070 KD.compute_pgm_rsrc2,
3071 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
3072 ValRange);
3073 } else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
3074 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3075 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
3076 Val, ValRange);
3077 } else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
3078 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3079 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
3080 Val, ValRange);
3081 } else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
3082 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3083 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
3084 Val, ValRange);
3085 } else if (ID == ".amdhsa_exception_int_div_zero") {
3086 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3087 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
3088 Val, ValRange);
3089 } else {
3090 return getParser().Error(IDRange.Start,
3091 "unknown .amdhsa_kernel directive", IDRange);
3092 }
3093
3094#undef PARSE_BITS_ENTRY
3095 }
3096
3097 if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
3098 return TokError(".amdhsa_next_free_vgpr directive is required");
3099
3100 if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
3101 return TokError(".amdhsa_next_free_sgpr directive is required");
3102
3103 unsigned VGPRBlocks;
3104 unsigned SGPRBlocks;
3105 if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
3106 ReserveXNACK, NextFreeVGPR, VGPRRange, NextFreeSGPR,
3107 SGPRRange, VGPRBlocks, SGPRBlocks))
3108 return true;
3109
3110 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
3111 VGPRBlocks))
3112 return OutOfRangeError(VGPRRange);
3113 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3114 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
3115
3116 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
3117 SGPRBlocks))
3118 return OutOfRangeError(SGPRRange);
3119 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3120 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
3121 SGPRBlocks);
3122
3123 if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
3124 return TokError("too many user SGPRs enabled");
3125 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
3126 UserSGPRCount);
3127
3128 getTargetStreamer().EmitAmdhsaKernelDescriptor(
3129 getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
3130 ReserveFlatScr, ReserveXNACK);
3131 return false;
3132}
3133
Tom Stellard347ac792015-06-26 21:15:07 +00003134bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00003135 uint32_t Major;
3136 uint32_t Minor;
3137
3138 if (ParseDirectiveMajorMinor(Major, Minor))
3139 return true;
3140
3141 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
3142 return false;
3143}
3144
3145bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00003146 uint32_t Major;
3147 uint32_t Minor;
3148 uint32_t Stepping;
3149 StringRef VendorName;
3150 StringRef ArchName;
3151
3152 // If this directive has no arguments, then use the ISA version for the
3153 // targeted GPU.
3154 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003155 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003156 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
3157 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00003158 "AMD", "AMDGPU");
3159 return false;
3160 }
3161
Tom Stellard347ac792015-06-26 21:15:07 +00003162 if (ParseDirectiveMajorMinor(Major, Minor))
3163 return true;
3164
3165 if (getLexer().isNot(AsmToken::Comma))
3166 return TokError("stepping version number required, comma expected");
3167 Lex();
3168
Artem Tamazov25478d82016-12-29 15:41:52 +00003169 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00003170 return TokError("invalid stepping version");
3171
Tom Stellard347ac792015-06-26 21:15:07 +00003172 if (getLexer().isNot(AsmToken::Comma))
3173 return TokError("vendor name required, comma expected");
3174 Lex();
3175
3176 if (getLexer().isNot(AsmToken::String))
3177 return TokError("invalid vendor name");
3178
3179 VendorName = getLexer().getTok().getStringContents();
3180 Lex();
3181
3182 if (getLexer().isNot(AsmToken::Comma))
3183 return TokError("arch name required, comma expected");
3184 Lex();
3185
3186 if (getLexer().isNot(AsmToken::String))
3187 return TokError("invalid arch name");
3188
3189 ArchName = getLexer().getTok().getStringContents();
3190 Lex();
3191
3192 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
3193 VendorName, ArchName);
3194 return false;
3195}
3196
Tom Stellardff7416b2015-06-26 21:58:31 +00003197bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
3198 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00003199 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
3200 // assembly for backwards compatibility.
3201 if (ID == "max_scratch_backing_memory_byte_size") {
3202 Parser.eatToEndOfStatement();
3203 return false;
3204 }
3205
Valery Pykhtindc110542016-03-06 20:25:36 +00003206 SmallString<40> ErrStr;
3207 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00003208 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00003209 return TokError(Err.str());
3210 }
Tom Stellardff7416b2015-06-26 21:58:31 +00003211 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00003212 return false;
3213}
3214
3215bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00003216 amd_kernel_code_t Header;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003217 AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
Tom Stellardff7416b2015-06-26 21:58:31 +00003218
3219 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00003220 // Lex EndOfStatement. This is in a while loop, because lexing a comment
3221 // will set the current token to EndOfStatement.
3222 while(getLexer().is(AsmToken::EndOfStatement))
3223 Lex();
3224
3225 if (getLexer().isNot(AsmToken::Identifier))
3226 return TokError("expected value identifier or .end_amd_kernel_code_t");
3227
3228 StringRef ID = getLexer().getTok().getIdentifier();
3229 Lex();
3230
3231 if (ID == ".end_amd_kernel_code_t")
3232 break;
3233
3234 if (ParseAMDKernelCodeTValue(ID, Header))
3235 return true;
3236 }
3237
3238 getTargetStreamer().EmitAMDKernelCodeT(Header);
3239
3240 return false;
3241}
3242
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003243bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
3244 if (getLexer().isNot(AsmToken::Identifier))
3245 return TokError("expected symbol name");
3246
3247 StringRef KernelName = Parser.getTok().getString();
3248
3249 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
3250 ELF::STT_AMDGPU_HSA_KERNEL);
3251 Lex();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003252 if (!AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI()))
3253 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003254 return false;
3255}
3256
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003257bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003258 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
3259 return Error(getParser().getTok().getLoc(),
3260 ".amd_amdgpu_isa directive is not available on non-amdgcn "
3261 "architectures");
3262 }
3263
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003264 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
3265
3266 std::string ISAVersionStringFromSTI;
3267 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
3268 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
3269
3270 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
3271 return Error(getParser().getTok().getLoc(),
3272 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
3273 "arguments specified through the command line");
3274 }
3275
3276 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
3277 Lex();
3278
3279 return false;
3280}
3281
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003282bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Scott Linderf5b36e52018-12-12 19:39:27 +00003283 const char *AssemblerDirectiveBegin;
3284 const char *AssemblerDirectiveEnd;
3285 std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) =
3286 AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())
3287 ? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin,
3288 HSAMD::V3::AssemblerDirectiveEnd)
3289 : std::make_tuple(HSAMD::AssemblerDirectiveBegin,
3290 HSAMD::AssemblerDirectiveEnd);
3291
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003292 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
3293 return Error(getParser().getTok().getLoc(),
Scott Linderf5b36e52018-12-12 19:39:27 +00003294 (Twine(AssemblerDirectiveBegin) + Twine(" directive is "
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003295 "not available on non-amdhsa OSes")).str());
3296 }
3297
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003298 std::string HSAMetadataString;
3299 raw_string_ostream YamlStream(HSAMetadataString);
3300
3301 getLexer().setSkipSpace(false);
3302
3303 bool FoundEnd = false;
3304 while (!getLexer().is(AsmToken::Eof)) {
3305 while (getLexer().is(AsmToken::Space)) {
3306 YamlStream << getLexer().getTok().getString();
3307 Lex();
3308 }
3309
3310 if (getLexer().is(AsmToken::Identifier)) {
3311 StringRef ID = getLexer().getTok().getIdentifier();
Scott Linderf5b36e52018-12-12 19:39:27 +00003312 if (ID == AssemblerDirectiveEnd) {
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003313 Lex();
3314 FoundEnd = true;
3315 break;
3316 }
3317 }
3318
3319 YamlStream << Parser.parseStringToEndOfStatement()
3320 << getContext().getAsmInfo()->getSeparatorString();
3321
3322 Parser.eatToEndOfStatement();
3323 }
3324
3325 getLexer().setSkipSpace(true);
3326
3327 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
3328 return TokError(Twine("expected directive ") +
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003329 Twine(HSAMD::AssemblerDirectiveEnd) + Twine(" not found"));
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003330 }
3331
3332 YamlStream.flush();
3333
Scott Linderf5b36e52018-12-12 19:39:27 +00003334 if (IsaInfo::hasCodeObjectV3(&getSTI())) {
3335 if (!getTargetStreamer().EmitHSAMetadataV3(HSAMetadataString))
3336 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3337 } else {
3338 if (!getTargetStreamer().EmitHSAMetadataV2(HSAMetadataString))
3339 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3340 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003341
3342 return false;
3343}
3344
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003345bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003346 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
3347 return Error(getParser().getTok().getLoc(),
3348 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
3349 "not available on non-amdpal OSes")).str());
3350 }
3351
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003352 PALMD::Metadata PALMetadata;
Tim Renouf72800f02017-10-03 19:03:52 +00003353 for (;;) {
3354 uint32_t Value;
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003355 if (ParseAsAbsoluteExpression(Value)) {
3356 return TokError(Twine("invalid value in ") +
3357 Twine(PALMD::AssemblerDirective));
3358 }
3359 PALMetadata.push_back(Value);
Tim Renouf72800f02017-10-03 19:03:52 +00003360 if (getLexer().isNot(AsmToken::Comma))
3361 break;
3362 Lex();
3363 }
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003364 getTargetStreamer().EmitPALMetadata(PALMetadata);
Tim Renouf72800f02017-10-03 19:03:52 +00003365 return false;
3366}
3367
Tom Stellard45bb48e2015-06-13 03:28:10 +00003368bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00003369 StringRef IDVal = DirectiveID.getString();
3370
Scott Linder1e8c2c72018-06-21 19:38:56 +00003371 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
3372 if (IDVal == ".amdgcn_target")
3373 return ParseDirectiveAMDGCNTarget();
Tom Stellard347ac792015-06-26 21:15:07 +00003374
Scott Linder1e8c2c72018-06-21 19:38:56 +00003375 if (IDVal == ".amdhsa_kernel")
3376 return ParseDirectiveAMDHSAKernel();
Scott Linderf5b36e52018-12-12 19:39:27 +00003377
3378 // TODO: Restructure/combine with PAL metadata directive.
3379 if (IDVal == AMDGPU::HSAMD::V3::AssemblerDirectiveBegin)
3380 return ParseDirectiveHSAMetadata();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003381 } else {
3382 if (IDVal == ".hsa_code_object_version")
3383 return ParseDirectiveHSACodeObjectVersion();
Tom Stellard347ac792015-06-26 21:15:07 +00003384
Scott Linder1e8c2c72018-06-21 19:38:56 +00003385 if (IDVal == ".hsa_code_object_isa")
3386 return ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00003387
Scott Linder1e8c2c72018-06-21 19:38:56 +00003388 if (IDVal == ".amd_kernel_code_t")
3389 return ParseDirectiveAMDKernelCodeT();
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003390
Scott Linder1e8c2c72018-06-21 19:38:56 +00003391 if (IDVal == ".amdgpu_hsa_kernel")
3392 return ParseDirectiveAMDGPUHsaKernel();
3393
3394 if (IDVal == ".amd_amdgpu_isa")
3395 return ParseDirectiveISAVersion();
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003396
Scott Linderf5b36e52018-12-12 19:39:27 +00003397 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
3398 return ParseDirectiveHSAMetadata();
3399 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003400
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003401 if (IDVal == PALMD::AssemblerDirective)
3402 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00003403
Tom Stellard45bb48e2015-06-13 03:28:10 +00003404 return true;
3405}
3406
Matt Arsenault68802d32015-11-05 03:11:27 +00003407bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
3408 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003409
3410 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
3411 R.isValid(); ++R) {
3412 if (*R == RegNo)
3413 return isGFX9();
3414 }
3415
3416 switch (RegNo) {
3417 case AMDGPU::TBA:
3418 case AMDGPU::TBA_LO:
3419 case AMDGPU::TBA_HI:
3420 case AMDGPU::TMA:
3421 case AMDGPU::TMA_LO:
3422 case AMDGPU::TMA_HI:
3423 return !isGFX9();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00003424 case AMDGPU::XNACK_MASK:
3425 case AMDGPU::XNACK_MASK_LO:
3426 case AMDGPU::XNACK_MASK_HI:
3427 return !isCI() && !isSI() && hasXNACK();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003428 default:
3429 break;
3430 }
3431
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00003432 if (isInlineValue(RegNo))
3433 return !isCI() && !isSI() && !isVI();
3434
Matt Arsenault3b159672015-12-01 20:31:08 +00003435 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00003436 return true;
3437
Matt Arsenault3b159672015-12-01 20:31:08 +00003438 if (isSI()) {
3439 // No flat_scr
3440 switch (RegNo) {
3441 case AMDGPU::FLAT_SCR:
3442 case AMDGPU::FLAT_SCR_LO:
3443 case AMDGPU::FLAT_SCR_HI:
3444 return false;
3445 default:
3446 return true;
3447 }
3448 }
3449
Matt Arsenault68802d32015-11-05 03:11:27 +00003450 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
3451 // SI/CI have.
3452 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
3453 R.isValid(); ++R) {
3454 if (*R == RegNo)
3455 return false;
3456 }
3457
3458 return true;
3459}
3460
Alex Bradbury58eba092016-11-01 16:32:05 +00003461OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003462AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003463 // Try to parse with a custom parser
3464 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3465
3466 // If we successfully parsed the operand or if there as an error parsing,
3467 // we are done.
3468 //
3469 // If we are parsing after we reach EndOfStatement then this means we
3470 // are appending default values to the Operands list. This is only done
3471 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00003472 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00003473 getLexer().is(AsmToken::EndOfStatement))
3474 return ResTy;
3475
Sam Kolton1bdcef72016-05-23 09:59:02 +00003476 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00003477
Sam Kolton1bdcef72016-05-23 09:59:02 +00003478 if (ResTy == MatchOperand_Success)
3479 return ResTy;
3480
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003481 const auto &Tok = Parser.getTok();
3482 SMLoc S = Tok.getLoc();
Tom Stellard89049702016-06-15 02:54:14 +00003483
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003484 const MCExpr *Expr = nullptr;
3485 if (!Parser.parseExpression(Expr)) {
3486 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
3487 return MatchOperand_Success;
3488 }
3489
3490 // Possibly this is an instruction flag like 'gds'.
3491 if (Tok.getKind() == AsmToken::Identifier) {
3492 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003493 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00003494 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003495 }
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003496
Sam Kolton1bdcef72016-05-23 09:59:02 +00003497 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003498}
3499
Sam Kolton05ef1c92016-06-03 10:27:37 +00003500StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
3501 // Clear any forced encodings from the previous instruction.
3502 setForcedEncodingSize(0);
3503 setForcedDPP(false);
3504 setForcedSDWA(false);
3505
3506 if (Name.endswith("_e64")) {
3507 setForcedEncodingSize(64);
3508 return Name.substr(0, Name.size() - 4);
3509 } else if (Name.endswith("_e32")) {
3510 setForcedEncodingSize(32);
3511 return Name.substr(0, Name.size() - 4);
3512 } else if (Name.endswith("_dpp")) {
3513 setForcedDPP(true);
3514 return Name.substr(0, Name.size() - 4);
3515 } else if (Name.endswith("_sdwa")) {
3516 setForcedSDWA(true);
3517 return Name.substr(0, Name.size() - 5);
3518 }
3519 return Name;
3520}
3521
Tom Stellard45bb48e2015-06-13 03:28:10 +00003522bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
3523 StringRef Name,
3524 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003525 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00003526 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003527 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00003528
Tom Stellard45bb48e2015-06-13 03:28:10 +00003529 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00003530 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003531
3532 // Eat the comma or space if there is one.
3533 if (getLexer().is(AsmToken::Comma))
3534 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00003535
Tom Stellard45bb48e2015-06-13 03:28:10 +00003536 switch (Res) {
3537 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003538 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003539 Error(getLexer().getLoc(), "failed parsing operand.");
3540 while (!getLexer().is(AsmToken::EndOfStatement)) {
3541 Parser.Lex();
3542 }
3543 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003544 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003545 Error(getLexer().getLoc(), "not a valid operand.");
3546 while (!getLexer().is(AsmToken::EndOfStatement)) {
3547 Parser.Lex();
3548 }
3549 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003550 }
3551 }
3552
Tom Stellard45bb48e2015-06-13 03:28:10 +00003553 return false;
3554}
3555
3556//===----------------------------------------------------------------------===//
3557// Utility functions
3558//===----------------------------------------------------------------------===//
3559
Alex Bradbury58eba092016-11-01 16:32:05 +00003560OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00003561AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003562 switch(getLexer().getKind()) {
3563 default: return MatchOperand_NoMatch;
3564 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003565 StringRef Name = Parser.getTok().getString();
3566 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003567 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003568 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003569
3570 Parser.Lex();
3571 if (getLexer().isNot(AsmToken::Colon))
3572 return MatchOperand_ParseFail;
3573
3574 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003575
3576 bool IsMinus = false;
3577 if (getLexer().getKind() == AsmToken::Minus) {
3578 Parser.Lex();
3579 IsMinus = true;
3580 }
3581
Tom Stellard45bb48e2015-06-13 03:28:10 +00003582 if (getLexer().isNot(AsmToken::Integer))
3583 return MatchOperand_ParseFail;
3584
3585 if (getParser().parseAbsoluteExpression(Int))
3586 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003587
3588 if (IsMinus)
3589 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003590 break;
3591 }
3592 }
3593 return MatchOperand_Success;
3594}
3595
Alex Bradbury58eba092016-11-01 16:32:05 +00003596OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003597AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003598 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003599 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003600 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003601 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003602
Alex Bradbury58eba092016-11-01 16:32:05 +00003603 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003604 if (Res != MatchOperand_Success)
3605 return Res;
3606
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003607 if (ConvertResult && !ConvertResult(Value)) {
3608 return MatchOperand_ParseFail;
3609 }
3610
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003611 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003612 return MatchOperand_Success;
3613}
3614
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003615OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
3616 const char *Prefix,
3617 OperandVector &Operands,
3618 AMDGPUOperand::ImmTy ImmTy,
3619 bool (*ConvertResult)(int64_t&)) {
3620 StringRef Name = Parser.getTok().getString();
3621 if (!Name.equals(Prefix))
3622 return MatchOperand_NoMatch;
3623
3624 Parser.Lex();
3625 if (getLexer().isNot(AsmToken::Colon))
3626 return MatchOperand_ParseFail;
3627
3628 Parser.Lex();
3629 if (getLexer().isNot(AsmToken::LBrac))
3630 return MatchOperand_ParseFail;
3631 Parser.Lex();
3632
3633 unsigned Val = 0;
3634 SMLoc S = Parser.getTok().getLoc();
3635
3636 // FIXME: How to verify the number of elements matches the number of src
3637 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00003638 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003639 if (I != 0) {
3640 if (getLexer().is(AsmToken::RBrac))
3641 break;
3642
3643 if (getLexer().isNot(AsmToken::Comma))
3644 return MatchOperand_ParseFail;
3645 Parser.Lex();
3646 }
3647
3648 if (getLexer().isNot(AsmToken::Integer))
3649 return MatchOperand_ParseFail;
3650
3651 int64_t Op;
3652 if (getParser().parseAbsoluteExpression(Op))
3653 return MatchOperand_ParseFail;
3654
3655 if (Op != 0 && Op != 1)
3656 return MatchOperand_ParseFail;
3657 Val |= (Op << I);
3658 }
3659
3660 Parser.Lex();
3661 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
3662 return MatchOperand_Success;
3663}
3664
Alex Bradbury58eba092016-11-01 16:32:05 +00003665OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003666AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003667 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003668 int64_t Bit = 0;
3669 SMLoc S = Parser.getTok().getLoc();
3670
3671 // We are at the end of the statement, and this is a default argument, so
3672 // use a default value.
3673 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3674 switch(getLexer().getKind()) {
3675 case AsmToken::Identifier: {
3676 StringRef Tok = Parser.getTok().getString();
3677 if (Tok == Name) {
Ryan Taylor1f334d02018-08-28 15:07:30 +00003678 if (Tok == "r128" && isGFX9())
3679 Error(S, "r128 modifier is not supported on this GPU");
3680 if (Tok == "a16" && !isGFX9())
3681 Error(S, "a16 modifier is not supported on this GPU");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003682 Bit = 1;
3683 Parser.Lex();
3684 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
3685 Bit = 0;
3686 Parser.Lex();
3687 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00003688 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003689 }
3690 break;
3691 }
3692 default:
3693 return MatchOperand_NoMatch;
3694 }
3695 }
3696
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003697 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003698 return MatchOperand_Success;
3699}
3700
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003701static void addOptionalImmOperand(
3702 MCInst& Inst, const OperandVector& Operands,
3703 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
3704 AMDGPUOperand::ImmTy ImmT,
3705 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003706 auto i = OptionalIdx.find(ImmT);
3707 if (i != OptionalIdx.end()) {
3708 unsigned Idx = i->second;
3709 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
3710 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00003711 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003712 }
3713}
3714
Alex Bradbury58eba092016-11-01 16:32:05 +00003715OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003716AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00003717 if (getLexer().isNot(AsmToken::Identifier)) {
3718 return MatchOperand_NoMatch;
3719 }
3720 StringRef Tok = Parser.getTok().getString();
3721 if (Tok != Prefix) {
3722 return MatchOperand_NoMatch;
3723 }
3724
3725 Parser.Lex();
3726 if (getLexer().isNot(AsmToken::Colon)) {
3727 return MatchOperand_ParseFail;
3728 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003729
Sam Kolton3025e7f2016-04-26 13:33:56 +00003730 Parser.Lex();
3731 if (getLexer().isNot(AsmToken::Identifier)) {
3732 return MatchOperand_ParseFail;
3733 }
3734
3735 Value = Parser.getTok().getString();
3736 return MatchOperand_Success;
3737}
3738
Tim Renouf35484c92018-08-21 11:06:05 +00003739// dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their
3740// values to live in a joint format operand in the MCInst encoding.
3741OperandMatchResultTy
3742AMDGPUAsmParser::parseDfmtNfmt(OperandVector &Operands) {
3743 SMLoc S = Parser.getTok().getLoc();
3744 int64_t Dfmt = 0, Nfmt = 0;
3745 // dfmt and nfmt can appear in either order, and each is optional.
3746 bool GotDfmt = false, GotNfmt = false;
3747 while (!GotDfmt || !GotNfmt) {
3748 if (!GotDfmt) {
3749 auto Res = parseIntWithPrefix("dfmt", Dfmt);
3750 if (Res != MatchOperand_NoMatch) {
3751 if (Res != MatchOperand_Success)
3752 return Res;
3753 if (Dfmt >= 16) {
3754 Error(Parser.getTok().getLoc(), "out of range dfmt");
3755 return MatchOperand_ParseFail;
3756 }
3757 GotDfmt = true;
3758 Parser.Lex();
3759 continue;
3760 }
3761 }
3762 if (!GotNfmt) {
3763 auto Res = parseIntWithPrefix("nfmt", Nfmt);
3764 if (Res != MatchOperand_NoMatch) {
3765 if (Res != MatchOperand_Success)
3766 return Res;
3767 if (Nfmt >= 8) {
3768 Error(Parser.getTok().getLoc(), "out of range nfmt");
3769 return MatchOperand_ParseFail;
3770 }
3771 GotNfmt = true;
3772 Parser.Lex();
3773 continue;
3774 }
3775 }
3776 break;
3777 }
3778 if (!GotDfmt && !GotNfmt)
3779 return MatchOperand_NoMatch;
3780 auto Format = Dfmt | Nfmt << 4;
3781 Operands.push_back(
3782 AMDGPUOperand::CreateImm(this, Format, S, AMDGPUOperand::ImmTyFORMAT));
3783 return MatchOperand_Success;
3784}
3785
Tom Stellard45bb48e2015-06-13 03:28:10 +00003786//===----------------------------------------------------------------------===//
3787// ds
3788//===----------------------------------------------------------------------===//
3789
Tom Stellard45bb48e2015-06-13 03:28:10 +00003790void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
3791 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003792 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003793
3794 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3795 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3796
3797 // Add the register arguments
3798 if (Op.isReg()) {
3799 Op.addRegOperands(Inst, 1);
3800 continue;
3801 }
3802
3803 // Handle optional arguments
3804 OptionalIdx[Op.getImmTy()] = i;
3805 }
3806
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003807 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
3808 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003809 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003810
Tom Stellard45bb48e2015-06-13 03:28:10 +00003811 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3812}
3813
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003814void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
3815 bool IsGdsHardcoded) {
3816 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003817
3818 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3819 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3820
3821 // Add the register arguments
3822 if (Op.isReg()) {
3823 Op.addRegOperands(Inst, 1);
3824 continue;
3825 }
3826
3827 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00003828 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003829 continue;
3830 }
3831
3832 // Handle optional arguments
3833 OptionalIdx[Op.getImmTy()] = i;
3834 }
3835
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003836 AMDGPUOperand::ImmTy OffsetType =
3837 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
3838 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
3839 AMDGPUOperand::ImmTyOffset;
3840
3841 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
3842
Artem Tamazov43b61562017-02-03 12:47:30 +00003843 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003844 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003845 }
3846 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3847}
3848
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003849void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
3850 OptionalImmIndexMap OptionalIdx;
3851
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003852 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003853 unsigned EnMask = 0;
3854 int SrcIdx = 0;
3855
3856 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3857 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3858
3859 // Add the register arguments
3860 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003861 assert(SrcIdx < 4);
3862 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003863 Op.addRegOperands(Inst, 1);
3864 ++SrcIdx;
3865 continue;
3866 }
3867
3868 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003869 assert(SrcIdx < 4);
3870 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003871 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003872 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003873 continue;
3874 }
3875
3876 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
3877 Op.addImmOperands(Inst, 1);
3878 continue;
3879 }
3880
3881 if (Op.isToken() && Op.getToken() == "done")
3882 continue;
3883
3884 // Handle optional arguments
3885 OptionalIdx[Op.getImmTy()] = i;
3886 }
3887
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003888 assert(SrcIdx == 4);
3889
3890 bool Compr = false;
3891 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
3892 Compr = true;
3893 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
3894 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
3895 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
3896 }
3897
3898 for (auto i = 0; i < SrcIdx; ++i) {
3899 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
3900 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
3901 }
3902 }
3903
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003904 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
3905 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
3906
3907 Inst.addOperand(MCOperand::createImm(EnMask));
3908}
Tom Stellard45bb48e2015-06-13 03:28:10 +00003909
3910//===----------------------------------------------------------------------===//
3911// s_waitcnt
3912//===----------------------------------------------------------------------===//
3913
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003914static bool
3915encodeCnt(
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003916 const AMDGPU::IsaVersion ISA,
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003917 int64_t &IntVal,
3918 int64_t CntVal,
3919 bool Saturate,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003920 unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned),
3921 unsigned (*decode)(const IsaVersion &Version, unsigned))
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003922{
3923 bool Failed = false;
3924
3925 IntVal = encode(ISA, IntVal, CntVal);
3926 if (CntVal != decode(ISA, IntVal)) {
3927 if (Saturate) {
3928 IntVal = encode(ISA, IntVal, -1);
3929 } else {
3930 Failed = true;
3931 }
3932 }
3933 return Failed;
3934}
3935
Tom Stellard45bb48e2015-06-13 03:28:10 +00003936bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
3937 StringRef CntName = Parser.getTok().getString();
3938 int64_t CntVal;
3939
3940 Parser.Lex();
3941 if (getLexer().isNot(AsmToken::LParen))
3942 return true;
3943
3944 Parser.Lex();
3945 if (getLexer().isNot(AsmToken::Integer))
3946 return true;
3947
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003948 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00003949 if (getParser().parseAbsoluteExpression(CntVal))
3950 return true;
3951
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003952 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003953
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003954 bool Failed = true;
3955 bool Sat = CntName.endswith("_sat");
3956
3957 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
3958 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
3959 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
3960 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
3961 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
3962 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
3963 }
3964
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003965 if (Failed) {
3966 Error(ValLoc, "too large value for " + CntName);
3967 return true;
3968 }
3969
3970 if (getLexer().isNot(AsmToken::RParen)) {
3971 return true;
3972 }
3973
3974 Parser.Lex();
3975 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
3976 const AsmToken NextToken = getLexer().peekTok();
3977 if (NextToken.is(AsmToken::Identifier)) {
3978 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003979 }
3980 }
3981
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003982 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003983}
3984
Alex Bradbury58eba092016-11-01 16:32:05 +00003985OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003986AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003987 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003988 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003989 SMLoc S = Parser.getTok().getLoc();
3990
3991 switch(getLexer().getKind()) {
3992 default: return MatchOperand_ParseFail;
3993 case AsmToken::Integer:
3994 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003995 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003996 return MatchOperand_ParseFail;
3997 break;
3998
3999 case AsmToken::Identifier:
4000 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004001 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00004002 return MatchOperand_ParseFail;
4003 } while(getLexer().isNot(AsmToken::EndOfStatement));
4004 break;
4005 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004006 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004007 return MatchOperand_Success;
4008}
4009
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004010bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
4011 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004012 using namespace llvm::AMDGPU::Hwreg;
4013
Artem Tamazovd6468662016-04-25 14:13:51 +00004014 if (Parser.getTok().getString() != "hwreg")
4015 return true;
4016 Parser.Lex();
4017
4018 if (getLexer().isNot(AsmToken::LParen))
4019 return true;
4020 Parser.Lex();
4021
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004022 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004023 HwReg.IsSymbolic = true;
4024 HwReg.Id = ID_UNKNOWN_;
4025 const StringRef tok = Parser.getTok().getString();
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00004026 int Last = ID_SYMBOLIC_LAST_;
4027 if (isSI() || isCI() || isVI())
4028 Last = ID_SYMBOLIC_FIRST_GFX9_;
4029 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004030 if (tok == IdSymbolic[i]) {
4031 HwReg.Id = i;
4032 break;
4033 }
4034 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004035 Parser.Lex();
4036 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004037 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004038 if (getLexer().isNot(AsmToken::Integer))
4039 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004040 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004041 return true;
4042 }
Artem Tamazovd6468662016-04-25 14:13:51 +00004043
4044 if (getLexer().is(AsmToken::RParen)) {
4045 Parser.Lex();
4046 return false;
4047 }
4048
4049 // optional params
4050 if (getLexer().isNot(AsmToken::Comma))
4051 return true;
4052 Parser.Lex();
4053
4054 if (getLexer().isNot(AsmToken::Integer))
4055 return true;
4056 if (getParser().parseAbsoluteExpression(Offset))
4057 return true;
4058
4059 if (getLexer().isNot(AsmToken::Comma))
4060 return true;
4061 Parser.Lex();
4062
4063 if (getLexer().isNot(AsmToken::Integer))
4064 return true;
4065 if (getParser().parseAbsoluteExpression(Width))
4066 return true;
4067
4068 if (getLexer().isNot(AsmToken::RParen))
4069 return true;
4070 Parser.Lex();
4071
4072 return false;
4073}
4074
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004075OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004076 using namespace llvm::AMDGPU::Hwreg;
4077
Artem Tamazovd6468662016-04-25 14:13:51 +00004078 int64_t Imm16Val = 0;
4079 SMLoc S = Parser.getTok().getLoc();
4080
4081 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00004082 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00004083 case AsmToken::Integer:
4084 // The operand can be an integer value.
4085 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00004086 return MatchOperand_NoMatch;
4087 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00004088 Error(S, "invalid immediate: only 16-bit values are legal");
4089 // Do not return error code, but create an imm operand anyway and proceed
4090 // to the next operand, if any. That avoids unneccessary error messages.
4091 }
4092 break;
4093
4094 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004095 OperandInfoTy HwReg(ID_UNKNOWN_);
4096 int64_t Offset = OFFSET_DEFAULT_;
4097 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
4098 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00004099 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004100 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
4101 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004102 Error(S, "invalid symbolic name of hardware register");
4103 else
4104 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00004105 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004106 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00004107 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004108 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00004109 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004110 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00004111 }
4112 break;
4113 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004114 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00004115 return MatchOperand_Success;
4116}
4117
Tom Stellard45bb48e2015-06-13 03:28:10 +00004118bool AMDGPUOperand::isSWaitCnt() const {
4119 return isImm();
4120}
4121
Artem Tamazovd6468662016-04-25 14:13:51 +00004122bool AMDGPUOperand::isHwreg() const {
4123 return isImmTy(ImmTyHwreg);
4124}
4125
Artem Tamazov6edc1352016-05-26 17:00:33 +00004126bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004127 using namespace llvm::AMDGPU::SendMsg;
4128
4129 if (Parser.getTok().getString() != "sendmsg")
4130 return true;
4131 Parser.Lex();
4132
4133 if (getLexer().isNot(AsmToken::LParen))
4134 return true;
4135 Parser.Lex();
4136
4137 if (getLexer().is(AsmToken::Identifier)) {
4138 Msg.IsSymbolic = true;
4139 Msg.Id = ID_UNKNOWN_;
4140 const std::string tok = Parser.getTok().getString();
4141 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
4142 switch(i) {
4143 default: continue; // Omit gaps.
4144 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
4145 }
4146 if (tok == IdSymbolic[i]) {
4147 Msg.Id = i;
4148 break;
4149 }
4150 }
4151 Parser.Lex();
4152 } else {
4153 Msg.IsSymbolic = false;
4154 if (getLexer().isNot(AsmToken::Integer))
4155 return true;
4156 if (getParser().parseAbsoluteExpression(Msg.Id))
4157 return true;
4158 if (getLexer().is(AsmToken::Integer))
4159 if (getParser().parseAbsoluteExpression(Msg.Id))
4160 Msg.Id = ID_UNKNOWN_;
4161 }
4162 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
4163 return false;
4164
4165 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
4166 if (getLexer().isNot(AsmToken::RParen))
4167 return true;
4168 Parser.Lex();
4169 return false;
4170 }
4171
4172 if (getLexer().isNot(AsmToken::Comma))
4173 return true;
4174 Parser.Lex();
4175
4176 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
4177 Operation.Id = ID_UNKNOWN_;
4178 if (getLexer().is(AsmToken::Identifier)) {
4179 Operation.IsSymbolic = true;
4180 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
4181 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
4182 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004183 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004184 for (int i = F; i < L; ++i) {
4185 if (Tok == S[i]) {
4186 Operation.Id = i;
4187 break;
4188 }
4189 }
4190 Parser.Lex();
4191 } else {
4192 Operation.IsSymbolic = false;
4193 if (getLexer().isNot(AsmToken::Integer))
4194 return true;
4195 if (getParser().parseAbsoluteExpression(Operation.Id))
4196 return true;
4197 }
4198
4199 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4200 // Stream id is optional.
4201 if (getLexer().is(AsmToken::RParen)) {
4202 Parser.Lex();
4203 return false;
4204 }
4205
4206 if (getLexer().isNot(AsmToken::Comma))
4207 return true;
4208 Parser.Lex();
4209
4210 if (getLexer().isNot(AsmToken::Integer))
4211 return true;
4212 if (getParser().parseAbsoluteExpression(StreamId))
4213 return true;
4214 }
4215
4216 if (getLexer().isNot(AsmToken::RParen))
4217 return true;
4218 Parser.Lex();
4219 return false;
4220}
4221
Matt Arsenault0e8a2992016-12-15 20:40:20 +00004222OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
4223 if (getLexer().getKind() != AsmToken::Identifier)
4224 return MatchOperand_NoMatch;
4225
4226 StringRef Str = Parser.getTok().getString();
4227 int Slot = StringSwitch<int>(Str)
4228 .Case("p10", 0)
4229 .Case("p20", 1)
4230 .Case("p0", 2)
4231 .Default(-1);
4232
4233 SMLoc S = Parser.getTok().getLoc();
4234 if (Slot == -1)
4235 return MatchOperand_ParseFail;
4236
4237 Parser.Lex();
4238 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
4239 AMDGPUOperand::ImmTyInterpSlot));
4240 return MatchOperand_Success;
4241}
4242
4243OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
4244 if (getLexer().getKind() != AsmToken::Identifier)
4245 return MatchOperand_NoMatch;
4246
4247 StringRef Str = Parser.getTok().getString();
4248 if (!Str.startswith("attr"))
4249 return MatchOperand_NoMatch;
4250
4251 StringRef Chan = Str.take_back(2);
4252 int AttrChan = StringSwitch<int>(Chan)
4253 .Case(".x", 0)
4254 .Case(".y", 1)
4255 .Case(".z", 2)
4256 .Case(".w", 3)
4257 .Default(-1);
4258 if (AttrChan == -1)
4259 return MatchOperand_ParseFail;
4260
4261 Str = Str.drop_back(2).drop_front(4);
4262
4263 uint8_t Attr;
4264 if (Str.getAsInteger(10, Attr))
4265 return MatchOperand_ParseFail;
4266
4267 SMLoc S = Parser.getTok().getLoc();
4268 Parser.Lex();
4269 if (Attr > 63) {
4270 Error(S, "out of bounds attr");
4271 return MatchOperand_Success;
4272 }
4273
4274 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
4275
4276 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
4277 AMDGPUOperand::ImmTyInterpAttr));
4278 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
4279 AMDGPUOperand::ImmTyAttrChan));
4280 return MatchOperand_Success;
4281}
4282
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004283void AMDGPUAsmParser::errorExpTgt() {
4284 Error(Parser.getTok().getLoc(), "invalid exp target");
4285}
4286
4287OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
4288 uint8_t &Val) {
4289 if (Str == "null") {
4290 Val = 9;
4291 return MatchOperand_Success;
4292 }
4293
4294 if (Str.startswith("mrt")) {
4295 Str = Str.drop_front(3);
4296 if (Str == "z") { // == mrtz
4297 Val = 8;
4298 return MatchOperand_Success;
4299 }
4300
4301 if (Str.getAsInteger(10, Val))
4302 return MatchOperand_ParseFail;
4303
4304 if (Val > 7)
4305 errorExpTgt();
4306
4307 return MatchOperand_Success;
4308 }
4309
4310 if (Str.startswith("pos")) {
4311 Str = Str.drop_front(3);
4312 if (Str.getAsInteger(10, Val))
4313 return MatchOperand_ParseFail;
4314
4315 if (Val > 3)
4316 errorExpTgt();
4317
4318 Val += 12;
4319 return MatchOperand_Success;
4320 }
4321
4322 if (Str.startswith("param")) {
4323 Str = Str.drop_front(5);
4324 if (Str.getAsInteger(10, Val))
4325 return MatchOperand_ParseFail;
4326
4327 if (Val >= 32)
4328 errorExpTgt();
4329
4330 Val += 32;
4331 return MatchOperand_Success;
4332 }
4333
4334 if (Str.startswith("invalid_target_")) {
4335 Str = Str.drop_front(15);
4336 if (Str.getAsInteger(10, Val))
4337 return MatchOperand_ParseFail;
4338
4339 errorExpTgt();
4340 return MatchOperand_Success;
4341 }
4342
4343 return MatchOperand_NoMatch;
4344}
4345
4346OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
4347 uint8_t Val;
4348 StringRef Str = Parser.getTok().getString();
4349
4350 auto Res = parseExpTgtImpl(Str, Val);
4351 if (Res != MatchOperand_Success)
4352 return Res;
4353
4354 SMLoc S = Parser.getTok().getLoc();
4355 Parser.Lex();
4356
4357 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
4358 AMDGPUOperand::ImmTyExpTgt));
4359 return MatchOperand_Success;
4360}
4361
Alex Bradbury58eba092016-11-01 16:32:05 +00004362OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004363AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
4364 using namespace llvm::AMDGPU::SendMsg;
4365
4366 int64_t Imm16Val = 0;
4367 SMLoc S = Parser.getTok().getLoc();
4368
4369 switch(getLexer().getKind()) {
4370 default:
4371 return MatchOperand_NoMatch;
4372 case AsmToken::Integer:
4373 // The operand can be an integer value.
4374 if (getParser().parseAbsoluteExpression(Imm16Val))
4375 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004376 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004377 Error(S, "invalid immediate: only 16-bit values are legal");
4378 // Do not return error code, but create an imm operand anyway and proceed
4379 // to the next operand, if any. That avoids unneccessary error messages.
4380 }
4381 break;
4382 case AsmToken::Identifier: {
4383 OperandInfoTy Msg(ID_UNKNOWN_);
4384 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00004385 int64_t StreamId = STREAM_ID_DEFAULT_;
4386 if (parseSendMsgConstruct(Msg, Operation, StreamId))
4387 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004388 do {
4389 // Validate and encode message ID.
4390 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
4391 || Msg.Id == ID_SYSMSG)) {
4392 if (Msg.IsSymbolic)
4393 Error(S, "invalid/unsupported symbolic name of message");
4394 else
4395 Error(S, "invalid/unsupported code of message");
4396 break;
4397 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004398 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004399 // Validate and encode operation ID.
4400 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
4401 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
4402 if (Operation.IsSymbolic)
4403 Error(S, "invalid symbolic name of GS_OP");
4404 else
4405 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
4406 break;
4407 }
4408 if (Operation.Id == OP_GS_NOP
4409 && Msg.Id != ID_GS_DONE) {
4410 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
4411 break;
4412 }
4413 Imm16Val |= (Operation.Id << OP_SHIFT_);
4414 }
4415 if (Msg.Id == ID_SYSMSG) {
4416 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
4417 if (Operation.IsSymbolic)
4418 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
4419 else
4420 Error(S, "invalid/unsupported code of SYSMSG_OP");
4421 break;
4422 }
4423 Imm16Val |= (Operation.Id << OP_SHIFT_);
4424 }
4425 // Validate and encode stream ID.
4426 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4427 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
4428 Error(S, "invalid stream id: only 2-bit values are legal");
4429 break;
4430 }
4431 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
4432 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004433 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004434 }
4435 break;
4436 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004437 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004438 return MatchOperand_Success;
4439}
4440
4441bool AMDGPUOperand::isSendMsg() const {
4442 return isImmTy(ImmTySendMsg);
4443}
4444
Tom Stellard45bb48e2015-06-13 03:28:10 +00004445//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004446// parser helpers
4447//===----------------------------------------------------------------------===//
4448
4449bool
4450AMDGPUAsmParser::trySkipId(const StringRef Id) {
4451 if (getLexer().getKind() == AsmToken::Identifier &&
4452 Parser.getTok().getString() == Id) {
4453 Parser.Lex();
4454 return true;
4455 }
4456 return false;
4457}
4458
4459bool
4460AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
4461 if (getLexer().getKind() == Kind) {
4462 Parser.Lex();
4463 return true;
4464 }
4465 return false;
4466}
4467
4468bool
4469AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
4470 const StringRef ErrMsg) {
4471 if (!trySkipToken(Kind)) {
4472 Error(Parser.getTok().getLoc(), ErrMsg);
4473 return false;
4474 }
4475 return true;
4476}
4477
4478bool
4479AMDGPUAsmParser::parseExpr(int64_t &Imm) {
4480 return !getParser().parseAbsoluteExpression(Imm);
4481}
4482
4483bool
4484AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
4485 SMLoc S = Parser.getTok().getLoc();
4486 if (getLexer().getKind() == AsmToken::String) {
4487 Val = Parser.getTok().getStringContents();
4488 Parser.Lex();
4489 return true;
4490 } else {
4491 Error(S, ErrMsg);
4492 return false;
4493 }
4494}
4495
4496//===----------------------------------------------------------------------===//
4497// swizzle
4498//===----------------------------------------------------------------------===//
4499
4500LLVM_READNONE
4501static unsigned
4502encodeBitmaskPerm(const unsigned AndMask,
4503 const unsigned OrMask,
4504 const unsigned XorMask) {
4505 using namespace llvm::AMDGPU::Swizzle;
4506
4507 return BITMASK_PERM_ENC |
4508 (AndMask << BITMASK_AND_SHIFT) |
4509 (OrMask << BITMASK_OR_SHIFT) |
4510 (XorMask << BITMASK_XOR_SHIFT);
4511}
4512
4513bool
4514AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
4515 const unsigned MinVal,
4516 const unsigned MaxVal,
4517 const StringRef ErrMsg) {
4518 for (unsigned i = 0; i < OpNum; ++i) {
4519 if (!skipToken(AsmToken::Comma, "expected a comma")){
4520 return false;
4521 }
4522 SMLoc ExprLoc = Parser.getTok().getLoc();
4523 if (!parseExpr(Op[i])) {
4524 return false;
4525 }
4526 if (Op[i] < MinVal || Op[i] > MaxVal) {
4527 Error(ExprLoc, ErrMsg);
4528 return false;
4529 }
4530 }
4531
4532 return true;
4533}
4534
4535bool
4536AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
4537 using namespace llvm::AMDGPU::Swizzle;
4538
4539 int64_t Lane[LANE_NUM];
4540 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
4541 "expected a 2-bit lane id")) {
4542 Imm = QUAD_PERM_ENC;
Stanislav Mekhanoshin266f1572019-03-11 16:49:32 +00004543 for (unsigned I = 0; I < LANE_NUM; ++I) {
4544 Imm |= Lane[I] << (LANE_SHIFT * I);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004545 }
4546 return true;
4547 }
4548 return false;
4549}
4550
4551bool
4552AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
4553 using namespace llvm::AMDGPU::Swizzle;
4554
4555 SMLoc S = Parser.getTok().getLoc();
4556 int64_t GroupSize;
4557 int64_t LaneIdx;
4558
4559 if (!parseSwizzleOperands(1, &GroupSize,
4560 2, 32,
4561 "group size must be in the interval [2,32]")) {
4562 return false;
4563 }
4564 if (!isPowerOf2_64(GroupSize)) {
4565 Error(S, "group size must be a power of two");
4566 return false;
4567 }
4568 if (parseSwizzleOperands(1, &LaneIdx,
4569 0, GroupSize - 1,
4570 "lane id must be in the interval [0,group size - 1]")) {
4571 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
4572 return true;
4573 }
4574 return false;
4575}
4576
4577bool
4578AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
4579 using namespace llvm::AMDGPU::Swizzle;
4580
4581 SMLoc S = Parser.getTok().getLoc();
4582 int64_t GroupSize;
4583
4584 if (!parseSwizzleOperands(1, &GroupSize,
4585 2, 32, "group size must be in the interval [2,32]")) {
4586 return false;
4587 }
4588 if (!isPowerOf2_64(GroupSize)) {
4589 Error(S, "group size must be a power of two");
4590 return false;
4591 }
4592
4593 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
4594 return true;
4595}
4596
4597bool
4598AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
4599 using namespace llvm::AMDGPU::Swizzle;
4600
4601 SMLoc S = Parser.getTok().getLoc();
4602 int64_t GroupSize;
4603
4604 if (!parseSwizzleOperands(1, &GroupSize,
4605 1, 16, "group size must be in the interval [1,16]")) {
4606 return false;
4607 }
4608 if (!isPowerOf2_64(GroupSize)) {
4609 Error(S, "group size must be a power of two");
4610 return false;
4611 }
4612
4613 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
4614 return true;
4615}
4616
4617bool
4618AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
4619 using namespace llvm::AMDGPU::Swizzle;
4620
4621 if (!skipToken(AsmToken::Comma, "expected a comma")) {
4622 return false;
4623 }
4624
4625 StringRef Ctl;
4626 SMLoc StrLoc = Parser.getTok().getLoc();
4627 if (!parseString(Ctl)) {
4628 return false;
4629 }
4630 if (Ctl.size() != BITMASK_WIDTH) {
4631 Error(StrLoc, "expected a 5-character mask");
4632 return false;
4633 }
4634
4635 unsigned AndMask = 0;
4636 unsigned OrMask = 0;
4637 unsigned XorMask = 0;
4638
4639 for (size_t i = 0; i < Ctl.size(); ++i) {
4640 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
4641 switch(Ctl[i]) {
4642 default:
4643 Error(StrLoc, "invalid mask");
4644 return false;
4645 case '0':
4646 break;
4647 case '1':
4648 OrMask |= Mask;
4649 break;
4650 case 'p':
4651 AndMask |= Mask;
4652 break;
4653 case 'i':
4654 AndMask |= Mask;
4655 XorMask |= Mask;
4656 break;
4657 }
4658 }
4659
4660 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
4661 return true;
4662}
4663
4664bool
4665AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
4666
4667 SMLoc OffsetLoc = Parser.getTok().getLoc();
4668
4669 if (!parseExpr(Imm)) {
4670 return false;
4671 }
4672 if (!isUInt<16>(Imm)) {
4673 Error(OffsetLoc, "expected a 16-bit offset");
4674 return false;
4675 }
4676 return true;
4677}
4678
4679bool
4680AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
4681 using namespace llvm::AMDGPU::Swizzle;
4682
4683 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
4684
4685 SMLoc ModeLoc = Parser.getTok().getLoc();
4686 bool Ok = false;
4687
4688 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
4689 Ok = parseSwizzleQuadPerm(Imm);
4690 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
4691 Ok = parseSwizzleBitmaskPerm(Imm);
4692 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
4693 Ok = parseSwizzleBroadcast(Imm);
4694 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
4695 Ok = parseSwizzleSwap(Imm);
4696 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
4697 Ok = parseSwizzleReverse(Imm);
4698 } else {
4699 Error(ModeLoc, "expected a swizzle mode");
4700 }
4701
4702 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
4703 }
4704
4705 return false;
4706}
4707
4708OperandMatchResultTy
4709AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
4710 SMLoc S = Parser.getTok().getLoc();
4711 int64_t Imm = 0;
4712
4713 if (trySkipId("offset")) {
4714
4715 bool Ok = false;
4716 if (skipToken(AsmToken::Colon, "expected a colon")) {
4717 if (trySkipId("swizzle")) {
4718 Ok = parseSwizzleMacro(Imm);
4719 } else {
4720 Ok = parseSwizzleOffset(Imm);
4721 }
4722 }
4723
4724 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
4725
4726 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
4727 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00004728 // Swizzle "offset" operand is optional.
4729 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004730 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004731 }
4732}
4733
4734bool
4735AMDGPUOperand::isSwizzle() const {
4736 return isImmTy(ImmTySwizzle);
4737}
4738
4739//===----------------------------------------------------------------------===//
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00004740// VGPR Index Mode
4741//===----------------------------------------------------------------------===//
4742
4743int64_t AMDGPUAsmParser::parseGPRIdxMacro() {
4744
4745 using namespace llvm::AMDGPU::VGPRIndexMode;
4746
4747 if (trySkipToken(AsmToken::RParen)) {
4748 return OFF;
4749 }
4750
4751 int64_t Imm = 0;
4752
4753 while (true) {
4754 unsigned Mode = 0;
4755 SMLoc S = Parser.getTok().getLoc();
4756
4757 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
4758 if (trySkipId(IdSymbolic[ModeId])) {
4759 Mode = 1 << ModeId;
4760 break;
4761 }
4762 }
4763
4764 if (Mode == 0) {
4765 Error(S, (Imm == 0)?
4766 "expected a VGPR index mode or a closing parenthesis" :
4767 "expected a VGPR index mode");
4768 break;
4769 }
4770
4771 if (Imm & Mode) {
4772 Error(S, "duplicate VGPR index mode");
4773 break;
4774 }
4775 Imm |= Mode;
4776
4777 if (trySkipToken(AsmToken::RParen))
4778 break;
4779 if (!skipToken(AsmToken::Comma,
4780 "expected a comma or a closing parenthesis"))
4781 break;
4782 }
4783
4784 return Imm;
4785}
4786
4787OperandMatchResultTy
4788AMDGPUAsmParser::parseGPRIdxMode(OperandVector &Operands) {
4789
4790 int64_t Imm = 0;
4791 SMLoc S = Parser.getTok().getLoc();
4792
4793 if (getLexer().getKind() == AsmToken::Identifier &&
4794 Parser.getTok().getString() == "gpr_idx" &&
4795 getLexer().peekTok().is(AsmToken::LParen)) {
4796
4797 Parser.Lex();
4798 Parser.Lex();
4799
4800 // If parse failed, trigger an error but do not return error code
4801 // to avoid excessive error messages.
4802 Imm = parseGPRIdxMacro();
4803
4804 } else {
4805 if (getParser().parseAbsoluteExpression(Imm))
4806 return MatchOperand_NoMatch;
4807 if (Imm < 0 || !isUInt<4>(Imm)) {
4808 Error(S, "invalid immediate: only 4-bit values are legal");
4809 }
4810 }
4811
4812 Operands.push_back(
4813 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyGprIdxMode));
4814 return MatchOperand_Success;
4815}
4816
4817bool AMDGPUOperand::isGPRIdxMode() const {
4818 return isImmTy(ImmTyGprIdxMode);
4819}
4820
4821//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004822// sopp branch targets
4823//===----------------------------------------------------------------------===//
4824
Alex Bradbury58eba092016-11-01 16:32:05 +00004825OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004826AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
4827 SMLoc S = Parser.getTok().getLoc();
4828
4829 switch (getLexer().getKind()) {
4830 default: return MatchOperand_ParseFail;
4831 case AsmToken::Integer: {
4832 int64_t Imm;
4833 if (getParser().parseAbsoluteExpression(Imm))
4834 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004835 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004836 return MatchOperand_Success;
4837 }
4838
4839 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004840 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00004841 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
4842 Parser.getTok().getString()), getContext()), S));
4843 Parser.Lex();
4844 return MatchOperand_Success;
4845 }
4846}
4847
4848//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004849// mubuf
4850//===----------------------------------------------------------------------===//
4851
Sam Kolton5f10a132016-05-06 11:31:17 +00004852AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004853 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004854}
4855
4856AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004857 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004858}
4859
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004860void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
4861 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004862 bool IsAtomic,
4863 bool IsAtomicReturn,
4864 bool IsLds) {
4865 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004866 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004867 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004868 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004869
4870 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4871 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4872
4873 // Add the register arguments
4874 if (Op.isReg()) {
4875 Op.addRegOperands(Inst, 1);
4876 continue;
4877 }
4878
4879 // Handle the case where soffset is an immediate
4880 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4881 Op.addImmOperands(Inst, 1);
4882 continue;
4883 }
4884
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004885 HasLdsModifier = Op.isLDS();
4886
Tom Stellard45bb48e2015-06-13 03:28:10 +00004887 // Handle tokens like 'offen' which are sometimes hard-coded into the
4888 // asm string. There are no MCInst operands for these.
4889 if (Op.isToken()) {
4890 continue;
4891 }
4892 assert(Op.isImm());
4893
4894 // Handle optional arguments
4895 OptionalIdx[Op.getImmTy()] = i;
4896 }
4897
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004898 // This is a workaround for an llvm quirk which may result in an
4899 // incorrect instruction selection. Lds and non-lds versions of
4900 // MUBUF instructions are identical except that lds versions
4901 // have mandatory 'lds' modifier. However this modifier follows
4902 // optional modifiers and llvm asm matcher regards this 'lds'
4903 // modifier as an optional one. As a result, an lds version
4904 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004905 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004906 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
4907 if (NoLdsOpcode != -1) { // Got lds version - correct it.
4908 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004909 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004910 }
4911 }
4912
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004913 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
4914 if (IsAtomicReturn) {
4915 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
4916 Inst.insert(I, *I);
4917 }
4918
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004919 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004920 if (!IsAtomic) { // glc is hard-coded.
4921 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4922 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004923 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004924
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004925 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004926 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4927 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004928}
4929
David Stuttard70e8bc12017-06-22 16:29:22 +00004930void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
4931 OptionalImmIndexMap OptionalIdx;
4932
4933 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4934 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4935
4936 // Add the register arguments
4937 if (Op.isReg()) {
4938 Op.addRegOperands(Inst, 1);
4939 continue;
4940 }
4941
4942 // Handle the case where soffset is an immediate
4943 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4944 Op.addImmOperands(Inst, 1);
4945 continue;
4946 }
4947
4948 // Handle tokens like 'offen' which are sometimes hard-coded into the
4949 // asm string. There are no MCInst operands for these.
4950 if (Op.isToken()) {
4951 continue;
4952 }
4953 assert(Op.isImm());
4954
4955 // Handle optional arguments
4956 OptionalIdx[Op.getImmTy()] = i;
4957 }
4958
4959 addOptionalImmOperand(Inst, Operands, OptionalIdx,
4960 AMDGPUOperand::ImmTyOffset);
Tim Renouf35484c92018-08-21 11:06:05 +00004961 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT);
David Stuttard70e8bc12017-06-22 16:29:22 +00004962 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4963 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4964 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4965}
4966
Tom Stellard45bb48e2015-06-13 03:28:10 +00004967//===----------------------------------------------------------------------===//
4968// mimg
4969//===----------------------------------------------------------------------===//
4970
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004971void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
4972 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00004973 unsigned I = 1;
4974 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4975 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4976 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4977 }
4978
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004979 if (IsAtomic) {
4980 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004981 assert(Desc.getNumDefs() == 1);
4982 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004983 }
4984
Sam Kolton1bdcef72016-05-23 09:59:02 +00004985 OptionalImmIndexMap OptionalIdx;
4986
4987 for (unsigned E = Operands.size(); I != E; ++I) {
4988 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4989
4990 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004991 if (Op.isReg()) {
4992 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004993 } else if (Op.isImmModifier()) {
4994 OptionalIdx[Op.getImmTy()] = I;
4995 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00004996 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00004997 }
4998 }
4999
5000 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
5001 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
5002 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005003 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005004 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005005 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5006 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005007 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005008 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005009}
5010
5011void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005012 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005013}
5014
Tom Stellard45bb48e2015-06-13 03:28:10 +00005015//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00005016// smrd
5017//===----------------------------------------------------------------------===//
5018
Artem Tamazov54bfd542016-10-31 16:07:39 +00005019bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00005020 return isImm() && isUInt<8>(getImm());
5021}
5022
Artem Tamazov54bfd542016-10-31 16:07:39 +00005023bool AMDGPUOperand::isSMRDOffset20() const {
5024 return isImm() && isUInt<20>(getImm());
5025}
5026
Tom Stellard217361c2015-08-06 19:28:38 +00005027bool AMDGPUOperand::isSMRDLiteralOffset() const {
5028 // 32-bit literals are only supported on CI and we only want to use them
5029 // when the offset is > 8-bits.
5030 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
5031}
5032
Artem Tamazov54bfd542016-10-31 16:07:39 +00005033AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
5034 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5035}
5036
5037AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005038 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005039}
5040
5041AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005042 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005043}
5044
Matt Arsenaultfd023142017-06-12 15:55:58 +00005045AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
5046 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5047}
5048
Matt Arsenault9698f1c2017-06-20 19:54:14 +00005049AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
5050 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5051}
5052
Tom Stellard217361c2015-08-06 19:28:38 +00005053//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005054// vop3
5055//===----------------------------------------------------------------------===//
5056
5057static bool ConvertOmodMul(int64_t &Mul) {
5058 if (Mul != 1 && Mul != 2 && Mul != 4)
5059 return false;
5060
5061 Mul >>= 1;
5062 return true;
5063}
5064
5065static bool ConvertOmodDiv(int64_t &Div) {
5066 if (Div == 1) {
5067 Div = 0;
5068 return true;
5069 }
5070
5071 if (Div == 2) {
5072 Div = 3;
5073 return true;
5074 }
5075
5076 return false;
5077}
5078
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005079static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
5080 if (BoundCtrl == 0) {
5081 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005082 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00005083 }
5084
5085 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005086 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005087 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005088 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005089
Tom Stellard45bb48e2015-06-13 03:28:10 +00005090 return false;
5091}
5092
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005093// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00005094static const OptionalOperand AMDGPUOptionalOperandTable[] = {
5095 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
5096 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
5097 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
5098 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
5099 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
5100 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005101 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005102 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00005103 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
Tim Renouf35484c92018-08-21 11:06:05 +00005104 {"dfmt", AMDGPUOperand::ImmTyFORMAT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005105 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
5106 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
5107 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00005108 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005109 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005110 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
5111 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
5112 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
5113 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
Ryan Taylor1f334d02018-08-28 15:07:30 +00005114 {"r128", AMDGPUOperand::ImmTyR128A16, true, nullptr},
5115 {"a16", AMDGPUOperand::ImmTyR128A16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005116 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005117 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005118 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
5119 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
5120 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
5121 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00005122 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
5123 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
5124 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005125 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00005126 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005127 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005128 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
5129 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
5130 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
5131 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005132};
Tom Stellard45bb48e2015-06-13 03:28:10 +00005133
Alex Bradbury58eba092016-11-01 16:32:05 +00005134OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00005135 unsigned size = Operands.size();
5136 assert(size > 0);
5137
5138 OperandMatchResultTy res = parseOptionalOpr(Operands);
5139
5140 // This is a hack to enable hardcoded mandatory operands which follow
5141 // optional operands.
5142 //
5143 // Current design assumes that all operands after the first optional operand
5144 // are also optional. However implementation of some instructions violates
5145 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
5146 //
5147 // To alleviate this problem, we have to (implicitly) parse extra operands
5148 // to make sure autogenerated parser of custom operands never hit hardcoded
5149 // mandatory operands.
5150
5151 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
5152
5153 // We have parsed the first optional operand.
5154 // Parse as many operands as necessary to skip all mandatory operands.
5155
5156 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
5157 if (res != MatchOperand_Success ||
5158 getLexer().is(AsmToken::EndOfStatement)) break;
5159 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
5160 res = parseOptionalOpr(Operands);
5161 }
5162 }
5163
5164 return res;
5165}
5166
5167OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00005168 OperandMatchResultTy res;
5169 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
5170 // try to parse any optional operand here
5171 if (Op.IsBit) {
5172 res = parseNamedBit(Op.Name, Operands, Op.Type);
5173 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
5174 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005175 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
5176 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
5177 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
5178 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00005179 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
5180 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005181 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
5182 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
5183 Op.Type == AMDGPUOperand::ImmTyNegLo ||
5184 Op.Type == AMDGPUOperand::ImmTyNegHi) {
5185 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
5186 Op.ConvertResult);
Tim Renouf35484c92018-08-21 11:06:05 +00005187 } else if (Op.Type == AMDGPUOperand::ImmTyFORMAT) {
5188 res = parseDfmtNfmt(Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00005189 } else {
5190 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
5191 }
5192 if (res != MatchOperand_NoMatch) {
5193 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005194 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005195 }
5196 return MatchOperand_NoMatch;
5197}
5198
Matt Arsenault12c53892016-11-15 19:58:54 +00005199OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005200 StringRef Name = Parser.getTok().getString();
5201 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00005202 return parseIntWithPrefix("mul", Operands,
5203 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005204 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005205
5206 if (Name == "div") {
5207 return parseIntWithPrefix("div", Operands,
5208 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
5209 }
5210
5211 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005212}
5213
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005214void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
5215 cvtVOP3P(Inst, Operands);
5216
5217 int Opc = Inst.getOpcode();
5218
5219 int SrcNum;
5220 const int Ops[] = { AMDGPU::OpName::src0,
5221 AMDGPU::OpName::src1,
5222 AMDGPU::OpName::src2 };
5223 for (SrcNum = 0;
5224 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
5225 ++SrcNum);
5226 assert(SrcNum > 0);
5227
5228 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
5229 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
5230
5231 if ((OpSel & (1 << SrcNum)) != 0) {
5232 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
5233 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
5234 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
5235 }
5236}
5237
Sam Koltona3ec5c12016-10-07 14:46:06 +00005238static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
5239 // 1. This operand is input modifiers
5240 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
5241 // 2. This is not last operand
5242 && Desc.NumOperands > (OpNum + 1)
5243 // 3. Next operand is register class
5244 && Desc.OpInfo[OpNum + 1].RegClass != -1
5245 // 4. Next register is not tied to any other operand
5246 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
5247}
5248
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005249void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
5250{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005251 OptionalImmIndexMap OptionalIdx;
5252 unsigned Opc = Inst.getOpcode();
5253
5254 unsigned I = 1;
5255 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5256 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5257 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5258 }
5259
5260 for (unsigned E = Operands.size(); I != E; ++I) {
5261 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5262 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5263 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5264 } else if (Op.isInterpSlot() ||
5265 Op.isInterpAttr() ||
5266 Op.isAttrChan()) {
5267 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
5268 } else if (Op.isImmModifier()) {
5269 OptionalIdx[Op.getImmTy()] = I;
5270 } else {
5271 llvm_unreachable("unhandled operand type");
5272 }
5273 }
5274
5275 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
5276 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
5277 }
5278
5279 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5280 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5281 }
5282
5283 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5284 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5285 }
5286}
5287
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005288void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
5289 OptionalImmIndexMap &OptionalIdx) {
5290 unsigned Opc = Inst.getOpcode();
5291
Tom Stellarda90b9522016-02-11 03:28:15 +00005292 unsigned I = 1;
5293 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00005294 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00005295 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00005296 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005297
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005298 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
5299 // This instruction has src modifiers
5300 for (unsigned E = Operands.size(); I != E; ++I) {
5301 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5302 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5303 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5304 } else if (Op.isImmModifier()) {
5305 OptionalIdx[Op.getImmTy()] = I;
5306 } else if (Op.isRegOrImm()) {
5307 Op.addRegOrImmOperands(Inst, 1);
5308 } else {
5309 llvm_unreachable("unhandled operand type");
5310 }
5311 }
5312 } else {
5313 // No src modifiers
5314 for (unsigned E = Operands.size(); I != E; ++I) {
5315 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5316 if (Op.isMod()) {
5317 OptionalIdx[Op.getImmTy()] = I;
5318 } else {
5319 Op.addRegOrImmOperands(Inst, 1);
5320 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005321 }
Tom Stellarda90b9522016-02-11 03:28:15 +00005322 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005323
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005324 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5325 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5326 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005327
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005328 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5329 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5330 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00005331
Matt Arsenault0084adc2018-04-30 19:08:16 +00005332 // Special case v_mac_{f16, f32} and v_fmac_f32 (gfx906):
Sam Koltona3ec5c12016-10-07 14:46:06 +00005333 // it has src2 register operand that is tied to dst operand
5334 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00005335 // should be 0.
5336 if (Opc == AMDGPU::V_MAC_F32_e64_si ||
5337 Opc == AMDGPU::V_MAC_F32_e64_vi ||
5338 Opc == AMDGPU::V_MAC_F16_e64_vi ||
5339 Opc == AMDGPU::V_FMAC_F32_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005340 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005341 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005342 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
5343 ++it;
5344 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
5345 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005346}
5347
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005348void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005349 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005350 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005351}
5352
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005353void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
5354 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005355 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005356 const int Opc = Inst.getOpcode();
5357 const MCInstrDesc &Desc = MII.get(Opc);
5358
5359 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005360
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005361 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005362
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00005363 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
5364 assert(!IsPacked);
5365 Inst.addOperand(Inst.getOperand(0));
5366 }
5367
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005368 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
5369 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005370
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005371 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005372
5373 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
5374 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005375 int DefaultVal = IsPacked ? -1 : 0;
5376 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
5377 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005378 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005379
5380 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
5381 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005382 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005383 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
5384 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
5385 }
5386
5387 const int Ops[] = { AMDGPU::OpName::src0,
5388 AMDGPU::OpName::src1,
5389 AMDGPU::OpName::src2 };
5390 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
5391 AMDGPU::OpName::src1_modifiers,
5392 AMDGPU::OpName::src2_modifiers };
5393
5394 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005395
5396 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005397 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005398 unsigned NegLo = 0;
5399 unsigned NegHi = 0;
5400
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005401 if (OpSelHiIdx != -1) {
5402 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
5403 }
5404
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005405 if (NegLoIdx != -1) {
5406 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
5407 NegLo = Inst.getOperand(NegLoIdx).getImm();
5408 NegHi = Inst.getOperand(NegHiIdx).getImm();
5409 }
5410
5411 for (int J = 0; J < 3; ++J) {
5412 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
5413 if (OpIdx == -1)
5414 break;
5415
5416 uint32_t ModVal = 0;
5417
5418 if ((OpSel & (1 << J)) != 0)
5419 ModVal |= SISrcMods::OP_SEL_0;
5420
5421 if ((OpSelHi & (1 << J)) != 0)
5422 ModVal |= SISrcMods::OP_SEL_1;
5423
5424 if ((NegLo & (1 << J)) != 0)
5425 ModVal |= SISrcMods::NEG;
5426
5427 if ((NegHi & (1 << J)) != 0)
5428 ModVal |= SISrcMods::NEG_HI;
5429
5430 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
5431
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00005432 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005433 }
5434}
5435
Sam Koltondfa29f72016-03-09 12:29:31 +00005436//===----------------------------------------------------------------------===//
5437// dpp
5438//===----------------------------------------------------------------------===//
5439
5440bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005441 using namespace AMDGPU::DPP;
5442
Sam Koltondfa29f72016-03-09 12:29:31 +00005443 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
5444 if (result) {
5445 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005446 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
5447 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
5448 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
5449 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
5450 (Imm == DppCtrl::WAVE_SHL1) ||
5451 (Imm == DppCtrl::WAVE_ROL1) ||
5452 (Imm == DppCtrl::WAVE_SHR1) ||
5453 (Imm == DppCtrl::WAVE_ROR1) ||
5454 (Imm == DppCtrl::ROW_MIRROR) ||
5455 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
5456 (Imm == DppCtrl::BCAST15) ||
5457 (Imm == DppCtrl::BCAST31);
Sam Koltondfa29f72016-03-09 12:29:31 +00005458 }
5459 return false;
5460}
5461
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00005462bool AMDGPUOperand::isS16Imm() const {
5463 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
5464}
5465
5466bool AMDGPUOperand::isU16Imm() const {
5467 return isImm() && isUInt<16>(getImm());
5468}
5469
Alex Bradbury58eba092016-11-01 16:32:05 +00005470OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00005471AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005472 using namespace AMDGPU::DPP;
5473
Sam Koltondfa29f72016-03-09 12:29:31 +00005474 SMLoc S = Parser.getTok().getLoc();
5475 StringRef Prefix;
5476 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00005477
Sam Koltona74cd522016-03-18 15:35:51 +00005478 if (getLexer().getKind() == AsmToken::Identifier) {
5479 Prefix = Parser.getTok().getString();
5480 } else {
5481 return MatchOperand_NoMatch;
5482 }
5483
5484 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005485 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005486 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005487 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005488 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005489 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005490 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005491 // Check to prevent parseDPPCtrlOps from eating invalid tokens
5492 if (Prefix != "quad_perm"
5493 && Prefix != "row_shl"
5494 && Prefix != "row_shr"
5495 && Prefix != "row_ror"
5496 && Prefix != "wave_shl"
5497 && Prefix != "wave_rol"
5498 && Prefix != "wave_shr"
5499 && Prefix != "wave_ror"
5500 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00005501 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00005502 }
5503
Sam Koltona74cd522016-03-18 15:35:51 +00005504 Parser.Lex();
5505 if (getLexer().isNot(AsmToken::Colon))
5506 return MatchOperand_ParseFail;
5507
5508 if (Prefix == "quad_perm") {
5509 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00005510 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005511 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00005512 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005513 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005514
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005515 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00005516 return MatchOperand_ParseFail;
5517
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005518 for (int i = 0; i < 3; ++i) {
5519 if (getLexer().isNot(AsmToken::Comma))
5520 return MatchOperand_ParseFail;
5521 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005522
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005523 int64_t Temp;
5524 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
5525 return MatchOperand_ParseFail;
5526 const int shift = i*2 + 2;
5527 Int += (Temp << shift);
5528 }
Sam Koltona74cd522016-03-18 15:35:51 +00005529
Sam Koltona74cd522016-03-18 15:35:51 +00005530 if (getLexer().isNot(AsmToken::RBrac))
5531 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005532 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005533 } else {
5534 // sel:%d
5535 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005536 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00005537 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005538
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005539 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005540 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005541 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005542 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005543 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005544 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005545 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005546 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005547 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005548 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005549 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005550 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005551 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005552 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00005553 } else if (Prefix == "row_bcast") {
5554 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005555 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00005556 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005557 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00005558 } else {
5559 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005560 }
5561 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005562 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005563 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005564 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005565 }
Sam Koltona74cd522016-03-18 15:35:51 +00005566
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005567 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00005568 return MatchOperand_Success;
5569}
5570
Sam Kolton5f10a132016-05-06 11:31:17 +00005571AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005572 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005573}
5574
David Stuttard20ea21c2019-03-12 09:52:58 +00005575AMDGPUOperand::Ptr AMDGPUAsmParser::defaultEndpgmImmOperands() const {
5576 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyEndpgm);
5577}
5578
Sam Kolton5f10a132016-05-06 11:31:17 +00005579AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005580 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005581}
5582
Sam Kolton5f10a132016-05-06 11:31:17 +00005583AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005584 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00005585}
5586
5587void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00005588 OptionalImmIndexMap OptionalIdx;
5589
5590 unsigned I = 1;
5591 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5592 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5593 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5594 }
5595
5596 for (unsigned E = Operands.size(); I != E; ++I) {
Valery Pykhtin3d9afa22018-11-30 14:21:56 +00005597 auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(),
5598 MCOI::TIED_TO);
5599 if (TiedTo != -1) {
5600 assert((unsigned)TiedTo < Inst.getNumOperands());
5601 // handle tied old or src2 for MAC instructions
5602 Inst.addOperand(Inst.getOperand(TiedTo));
5603 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005604 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5605 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00005606 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00005607 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00005608 // Skip it.
5609 continue;
5610 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00005611 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00005612 } else if (Op.isDPPCtrl()) {
5613 Op.addImmOperands(Inst, 1);
5614 } else if (Op.isImm()) {
5615 // Handle optional arguments
5616 OptionalIdx[Op.getImmTy()] = I;
5617 } else {
5618 llvm_unreachable("Invalid operand type");
5619 }
5620 }
5621
Sam Koltondfa29f72016-03-09 12:29:31 +00005622 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
5623 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
5624 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
5625}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00005626
Sam Kolton3025e7f2016-04-26 13:33:56 +00005627//===----------------------------------------------------------------------===//
5628// sdwa
5629//===----------------------------------------------------------------------===//
5630
Alex Bradbury58eba092016-11-01 16:32:05 +00005631OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00005632AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
5633 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005634 using namespace llvm::AMDGPU::SDWA;
5635
Sam Kolton3025e7f2016-04-26 13:33:56 +00005636 SMLoc S = Parser.getTok().getLoc();
5637 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005638 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00005639
Sam Kolton05ef1c92016-06-03 10:27:37 +00005640 res = parseStringWithPrefix(Prefix, Value);
5641 if (res != MatchOperand_Success) {
5642 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005643 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005644
Sam Kolton3025e7f2016-04-26 13:33:56 +00005645 int64_t Int;
5646 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005647 .Case("BYTE_0", SdwaSel::BYTE_0)
5648 .Case("BYTE_1", SdwaSel::BYTE_1)
5649 .Case("BYTE_2", SdwaSel::BYTE_2)
5650 .Case("BYTE_3", SdwaSel::BYTE_3)
5651 .Case("WORD_0", SdwaSel::WORD_0)
5652 .Case("WORD_1", SdwaSel::WORD_1)
5653 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005654 .Default(0xffffffff);
5655 Parser.Lex(); // eat last token
5656
5657 if (Int == 0xffffffff) {
5658 return MatchOperand_ParseFail;
5659 }
5660
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005661 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005662 return MatchOperand_Success;
5663}
5664
Alex Bradbury58eba092016-11-01 16:32:05 +00005665OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00005666AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005667 using namespace llvm::AMDGPU::SDWA;
5668
Sam Kolton3025e7f2016-04-26 13:33:56 +00005669 SMLoc S = Parser.getTok().getLoc();
5670 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005671 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005672
5673 res = parseStringWithPrefix("dst_unused", Value);
5674 if (res != MatchOperand_Success) {
5675 return res;
5676 }
5677
5678 int64_t Int;
5679 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005680 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
5681 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
5682 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005683 .Default(0xffffffff);
5684 Parser.Lex(); // eat last token
5685
5686 if (Int == 0xffffffff) {
5687 return MatchOperand_ParseFail;
5688 }
5689
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005690 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005691 return MatchOperand_Success;
5692}
5693
Sam Kolton945231a2016-06-10 09:57:59 +00005694void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005695 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005696}
5697
Sam Kolton945231a2016-06-10 09:57:59 +00005698void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005699 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
5700}
5701
Sam Koltonf7659d712017-05-23 10:08:55 +00005702void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
5703 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
5704}
5705
Sam Kolton5196b882016-07-01 09:59:21 +00005706void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005707 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00005708}
5709
5710void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00005711 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00005712 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005713
Sam Kolton05ef1c92016-06-03 10:27:37 +00005714 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00005715 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005716
5717 unsigned I = 1;
5718 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5719 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5720 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5721 }
5722
5723 for (unsigned E = Operands.size(); I != E; ++I) {
5724 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltonf7659d712017-05-23 10:08:55 +00005725 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
5726 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
5727 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
5728 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
5729 // Skip VCC only if we didn't skip it on previous iteration.
5730 if (BasicInstType == SIInstrFlags::VOP2 &&
5731 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
5732 skippedVcc = true;
5733 continue;
5734 } else if (BasicInstType == SIInstrFlags::VOPC &&
5735 Inst.getNumOperands() == 0) {
5736 skippedVcc = true;
5737 continue;
5738 }
5739 }
5740 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00005741 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005742 } else if (Op.isImm()) {
5743 // Handle optional arguments
5744 OptionalIdx[Op.getImmTy()] = I;
5745 } else {
5746 llvm_unreachable("Invalid operand type");
5747 }
Sam Koltonf7659d712017-05-23 10:08:55 +00005748 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005749 }
5750
Sam Koltonf7659d712017-05-23 10:08:55 +00005751 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
5752 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00005753 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00005754 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005755 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00005756 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005757 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005758 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5759 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005760 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5761 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5762 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005763 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005764
5765 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00005766 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005767 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005768 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5769 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005770 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5771 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5772 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5773 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005774 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005775
5776 case SIInstrFlags::VOPC:
Sam Kolton549c89d2017-06-21 08:53:38 +00005777 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00005778 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5779 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005780 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005781
Sam Koltona3ec5c12016-10-07 14:46:06 +00005782 default:
5783 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
5784 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005785 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00005786
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005787 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00005788 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00005789 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
5790 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005791 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005792 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00005793 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005794 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00005795 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005796}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00005797
Tom Stellard45bb48e2015-06-13 03:28:10 +00005798/// Force static initialization.
5799extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00005800 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
5801 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00005802}
5803
5804#define GET_REGISTER_MATCHER
5805#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00005806#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00005807#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00005808
Sam Kolton11de3702016-05-24 12:38:33 +00005809// This fuction should be defined after auto-generated include so that we have
5810// MatchClassKind enum defined
5811unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
5812 unsigned Kind) {
5813 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00005814 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00005815 // operand. This method checks if we are given immediate operand but expect to
5816 // get corresponding token.
5817 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
5818 switch (Kind) {
5819 case MCK_addr64:
5820 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
5821 case MCK_gds:
5822 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005823 case MCK_lds:
5824 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005825 case MCK_glc:
5826 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
5827 case MCK_idxen:
5828 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
5829 case MCK_offen:
5830 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005831 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00005832 // When operands have expression values, they will return true for isToken,
5833 // because it is not possible to distinguish between a token and an
5834 // expression at parse time. MatchInstructionImpl() will always try to
5835 // match an operand as a token, when isToken returns true, and when the
5836 // name of the expression is not a valid token, the match will fail,
5837 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005838 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
5839 case MCK_SSrcF32:
5840 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00005841 case MCK_SoppBrTarget:
5842 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005843 case MCK_VReg32OrOff:
5844 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00005845 case MCK_InterpSlot:
5846 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
5847 case MCK_Attr:
5848 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
5849 case MCK_AttrChan:
5850 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005851 default:
5852 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005853 }
5854}
David Stuttard20ea21c2019-03-12 09:52:58 +00005855
5856//===----------------------------------------------------------------------===//
5857// endpgm
5858//===----------------------------------------------------------------------===//
5859
5860OperandMatchResultTy AMDGPUAsmParser::parseEndpgmOp(OperandVector &Operands) {
5861 SMLoc S = Parser.getTok().getLoc();
5862 int64_t Imm = 0;
5863
5864 if (!parseExpr(Imm)) {
5865 // The operand is optional, if not present default to 0
5866 Imm = 0;
5867 }
5868
5869 if (!isUInt<16>(Imm)) {
5870 Error(S, "expected a 16-bit value");
5871 return MatchOperand_ParseFail;
5872 }
5873
5874 Operands.push_back(
5875 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyEndpgm));
5876 return MatchOperand_Success;
5877}
5878
5879bool AMDGPUOperand::isEndpgm() const { return isImmTy(ImmTyEndpgm); }