blob: 7fcfc0b29019cb5d5ea7afa71e18b1a47ddf6761 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00009#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000014#include "SIInstrInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000015#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000016#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000017#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000019#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000020#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000021#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000022#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000024#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000025#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000027#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000028#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000032#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000033#include "llvm/MC/MCInstrInfo.h"
34#include "llvm/MC/MCParser/MCAsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000036#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000038#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000042#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000043#include "llvm/Support/AMDGPUMetadata.h"
Scott Linder1e8c2c72018-06-21 19:38:56 +000044#include "llvm/Support/AMDHSAKernelDescriptor.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000045#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000046#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000047#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000048#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000049#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000050#include "llvm/Support/SMLoc.h"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +000051#include "llvm/Support/TargetParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000052#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000053#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000054#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <cstring>
58#include <iterator>
59#include <map>
60#include <memory>
61#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000062
Tom Stellard45bb48e2015-06-13 03:28:10 +000063using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000064using namespace llvm::AMDGPU;
Scott Linder1e8c2c72018-06-21 19:38:56 +000065using namespace llvm::amdhsa;
Tom Stellard45bb48e2015-06-13 03:28:10 +000066
67namespace {
68
Sam Kolton1eeb11b2016-09-09 14:44:04 +000069class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000070
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000071enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
72
Sam Kolton1eeb11b2016-09-09 14:44:04 +000073//===----------------------------------------------------------------------===//
74// Operand
75//===----------------------------------------------------------------------===//
76
Tom Stellard45bb48e2015-06-13 03:28:10 +000077class AMDGPUOperand : public MCParsedAsmOperand {
78 enum KindTy {
79 Token,
80 Immediate,
81 Register,
82 Expression
83 } Kind;
84
85 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000086 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000087
88public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000089 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000090 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000091
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000092 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000093
Sam Kolton945231a2016-06-10 09:57:59 +000094 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000095 bool Abs = false;
96 bool Neg = false;
97 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000098
99 bool hasFPModifiers() const { return Abs || Neg; }
100 bool hasIntModifiers() const { return Sext; }
101 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
102
103 int64_t getFPModifiersOperand() const {
104 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000105 Operand |= Abs ? SISrcMods::ABS : 0u;
106 Operand |= Neg ? SISrcMods::NEG : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000107 return Operand;
108 }
109
110 int64_t getIntModifiersOperand() const {
111 int64_t Operand = 0;
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000112 Operand |= Sext ? SISrcMods::SEXT : 0u;
Sam Kolton945231a2016-06-10 09:57:59 +0000113 return Operand;
114 }
115
116 int64_t getModifiersOperand() const {
117 assert(!(hasFPModifiers() && hasIntModifiers())
118 && "fp and int modifiers should not be used simultaneously");
119 if (hasFPModifiers()) {
120 return getFPModifiersOperand();
121 } else if (hasIntModifiers()) {
122 return getIntModifiersOperand();
123 } else {
124 return 0;
125 }
126 }
127
128 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
129 };
130
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 enum ImmTy {
132 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000133 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000134 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000135 ImmTyOffen,
136 ImmTyIdxen,
137 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000138 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000139 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000140 ImmTyOffset0,
141 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000142 ImmTyGLC,
143 ImmTySLC,
144 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000145 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000146 ImmTyClampSI,
147 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000148 ImmTyDppCtrl,
149 ImmTyDppRowMask,
150 ImmTyDppBankMask,
151 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000152 ImmTySdwaDstSel,
153 ImmTySdwaSrc0Sel,
154 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000155 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000156 ImmTyDMask,
157 ImmTyUNorm,
158 ImmTyDA,
Ryan Taylor1f334d02018-08-28 15:07:30 +0000159 ImmTyR128A16,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000160 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000161 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000162 ImmTyExpCompr,
163 ImmTyExpVM,
Tim Renouf35484c92018-08-21 11:06:05 +0000164 ImmTyFORMAT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000165 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000166 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000167 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000168 ImmTyInterpSlot,
169 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000170 ImmTyAttrChan,
171 ImmTyOpSel,
172 ImmTyOpSelHi,
173 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000174 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000175 ImmTySwizzle,
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000176 ImmTyGprIdxMode,
David Stuttard20ea21c2019-03-12 09:52:58 +0000177 ImmTyEndpgm,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000178 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000179 };
180
181 struct TokOp {
182 const char *Data;
183 unsigned Length;
184 };
185
186 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000187 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000188 ImmTy Type;
189 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000190 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 };
192
193 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000194 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000195 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000196 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197 };
198
199 union {
200 TokOp Tok;
201 ImmOp Imm;
202 RegOp Reg;
203 const MCExpr *Expr;
204 };
205
Tom Stellard45bb48e2015-06-13 03:28:10 +0000206 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000207 if (Kind == Token)
208 return true;
209
210 if (Kind != Expression || !Expr)
211 return false;
212
213 // When parsing operands, we can't always tell if something was meant to be
214 // a token, like 'gds', or an expression that references a global variable.
215 // In this case, we assume the string is an expression, and if we need to
216 // interpret is a token, then we treat the symbol name as the token.
217 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000218 }
219
220 bool isImm() const override {
221 return Kind == Immediate;
222 }
223
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000224 bool isInlinableImm(MVT type) const;
225 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000226
Tom Stellard45bb48e2015-06-13 03:28:10 +0000227 bool isRegKind() const {
228 return Kind == Register;
229 }
230
231 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000232 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000233 }
234
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000235 bool isRegOrImmWithInputMods(unsigned RCID, MVT type) const {
236 return isRegClass(RCID) || isInlinableImm(type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000237 }
238
Matt Arsenault4bd72362016-12-10 00:39:12 +0000239 bool isRegOrImmWithInt16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000240 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000241 }
242
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000243 bool isRegOrImmWithInt32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000244 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000245 }
246
247 bool isRegOrImmWithInt64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000248 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000249 }
250
Matt Arsenault4bd72362016-12-10 00:39:12 +0000251 bool isRegOrImmWithFP16InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000252 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000253 }
254
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000255 bool isRegOrImmWithFP32InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000256 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000257 }
258
259 bool isRegOrImmWithFP64InputMods() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000260 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000261 }
262
Sam Kolton9772eb32017-01-11 11:46:30 +0000263 bool isVReg() const {
264 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
265 isRegClass(AMDGPU::VReg_64RegClassID) ||
266 isRegClass(AMDGPU::VReg_96RegClassID) ||
267 isRegClass(AMDGPU::VReg_128RegClassID) ||
268 isRegClass(AMDGPU::VReg_256RegClassID) ||
269 isRegClass(AMDGPU::VReg_512RegClassID);
270 }
271
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000272 bool isVReg32() const {
273 return isRegClass(AMDGPU::VGPR_32RegClassID);
274 }
275
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000276 bool isVReg32OrOff() const {
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +0000277 return isOff() || isVReg32();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000278 }
279
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000280 bool isSDWAOperand(MVT type) const;
281 bool isSDWAFP16Operand() const;
282 bool isSDWAFP32Operand() const;
283 bool isSDWAInt16Operand() const;
284 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000285
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000286 bool isImmTy(ImmTy ImmT) const {
287 return isImm() && Imm.Type == ImmT;
288 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000289
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000290 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000291 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000292 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000293
Sam Kolton945231a2016-06-10 09:57:59 +0000294 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
295 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
296 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000297 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
298 bool isDA() const { return isImmTy(ImmTyDA); }
Ryan Taylor1f334d02018-08-28 15:07:30 +0000299 bool isR128A16() const { return isImmTy(ImmTyR128A16); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000300 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000301 bool isOff() const { return isImmTy(ImmTyOff); }
302 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000303 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
304 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000305 bool isOffen() const { return isImmTy(ImmTyOffen); }
306 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
307 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
308 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
Dmitry Preobrazhensky04bd1182019-03-20 17:13:58 +0000309 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<8>(getImm()); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000310 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000311
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000312 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
313 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000314 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000315 bool isLDS() const { return isImmTy(ImmTyLDS); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000316 bool isGLC() const { return isImmTy(ImmTyGLC); }
317 bool isSLC() const { return isImmTy(ImmTySLC); }
318 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000319 bool isD16() const { return isImmTy(ImmTyD16); }
Tim Renouf35484c92018-08-21 11:06:05 +0000320 bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000321 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
322 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
323 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
324 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
325 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
326 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
327 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000328 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
329 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
330 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000331 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
332 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
333 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
334 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000335 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000336
Sam Kolton945231a2016-06-10 09:57:59 +0000337 bool isMod() const {
338 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000339 }
340
341 bool isRegOrImm() const {
342 return isReg() || isImm();
343 }
344
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000345 bool isRegClass(unsigned RCID) const;
346
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +0000347 bool isInlineValue() const;
348
Sam Kolton9772eb32017-01-11 11:46:30 +0000349 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
350 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
351 }
352
Matt Arsenault4bd72362016-12-10 00:39:12 +0000353 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000354 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000355 }
356
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000357 bool isSCSrcV2B16() const {
358 return isSCSrcB16();
359 }
360
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000361 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000362 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000363 }
364
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000365 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000366 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000367 }
368
Matt Arsenault4bd72362016-12-10 00:39:12 +0000369 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000370 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000371 }
372
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000373 bool isSCSrcV2F16() const {
374 return isSCSrcF16();
375 }
376
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000377 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000378 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000379 }
380
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000381 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000382 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000383 }
384
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000385 bool isSSrcB32() const {
386 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
387 }
388
Matt Arsenault4bd72362016-12-10 00:39:12 +0000389 bool isSSrcB16() const {
390 return isSCSrcB16() || isLiteralImm(MVT::i16);
391 }
392
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000393 bool isSSrcV2B16() const {
394 llvm_unreachable("cannot happen");
395 return isSSrcB16();
396 }
397
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000398 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000399 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
400 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000401 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000402 }
403
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000404 bool isSSrcF32() const {
405 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000406 }
407
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000408 bool isSSrcF64() const {
409 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000410 }
411
Matt Arsenault4bd72362016-12-10 00:39:12 +0000412 bool isSSrcF16() const {
413 return isSCSrcB16() || isLiteralImm(MVT::f16);
414 }
415
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000416 bool isSSrcV2F16() const {
417 llvm_unreachable("cannot happen");
418 return isSSrcF16();
419 }
420
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000421 bool isSSrcOrLdsB32() const {
422 return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
423 isLiteralImm(MVT::i32) || isExpr();
424 }
425
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000426 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000427 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000428 }
429
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000430 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000431 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000432 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000433
Matt Arsenault4bd72362016-12-10 00:39:12 +0000434 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000435 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000436 }
437
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000438 bool isVCSrcV2B16() const {
439 return isVCSrcB16();
440 }
441
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000442 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000443 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000444 }
445
446 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000447 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000448 }
449
Matt Arsenault4bd72362016-12-10 00:39:12 +0000450 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000451 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000452 }
453
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000454 bool isVCSrcV2F16() const {
455 return isVCSrcF16();
456 }
457
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000458 bool isVSrcB32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000459 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000460 }
461
462 bool isVSrcB64() const {
463 return isVCSrcF64() || isLiteralImm(MVT::i64);
464 }
465
Matt Arsenault4bd72362016-12-10 00:39:12 +0000466 bool isVSrcB16() const {
467 return isVCSrcF16() || isLiteralImm(MVT::i16);
468 }
469
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000470 bool isVSrcV2B16() const {
471 llvm_unreachable("cannot happen");
472 return isVSrcB16();
473 }
474
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000475 bool isVSrcF32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000476 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000477 }
478
479 bool isVSrcF64() const {
480 return isVCSrcF64() || isLiteralImm(MVT::f64);
481 }
482
Matt Arsenault4bd72362016-12-10 00:39:12 +0000483 bool isVSrcF16() const {
484 return isVCSrcF16() || isLiteralImm(MVT::f16);
485 }
486
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000487 bool isVSrcV2F16() const {
488 llvm_unreachable("cannot happen");
489 return isVSrcF16();
490 }
491
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000492 bool isKImmFP32() const {
493 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000494 }
495
Matt Arsenault4bd72362016-12-10 00:39:12 +0000496 bool isKImmFP16() const {
497 return isLiteralImm(MVT::f16);
498 }
499
Tom Stellard45bb48e2015-06-13 03:28:10 +0000500 bool isMem() const override {
501 return false;
502 }
503
504 bool isExpr() const {
505 return Kind == Expression;
506 }
507
508 bool isSoppBrTarget() const {
509 return isExpr() || isImm();
510 }
511
Sam Kolton945231a2016-06-10 09:57:59 +0000512 bool isSWaitCnt() const;
513 bool isHwreg() const;
514 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000515 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000516 bool isSMRDOffset8() const;
517 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000518 bool isSMRDLiteralOffset() const;
519 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000520 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000521 bool isS16Imm() const;
522 bool isU16Imm() const;
David Stuttard20ea21c2019-03-12 09:52:58 +0000523 bool isEndpgm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000524
Tom Stellard89049702016-06-15 02:54:14 +0000525 StringRef getExpressionAsToken() const {
526 assert(isExpr());
527 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
528 return S->getSymbol().getName();
529 }
530
Sam Kolton945231a2016-06-10 09:57:59 +0000531 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000532 assert(isToken());
533
534 if (Kind == Expression)
535 return getExpressionAsToken();
536
Sam Kolton945231a2016-06-10 09:57:59 +0000537 return StringRef(Tok.Data, Tok.Length);
538 }
539
540 int64_t getImm() const {
541 assert(isImm());
542 return Imm.Val;
543 }
544
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000545 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000546 assert(isImm());
547 return Imm.Type;
548 }
549
550 unsigned getReg() const override {
551 return Reg.RegNo;
552 }
553
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 SMLoc getStartLoc() const override {
555 return StartLoc;
556 }
557
Peter Collingbourne0da86302016-10-10 22:49:37 +0000558 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000559 return EndLoc;
560 }
561
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000562 SMRange getLocRange() const {
563 return SMRange(StartLoc, EndLoc);
564 }
565
Sam Kolton945231a2016-06-10 09:57:59 +0000566 Modifiers getModifiers() const {
567 assert(isRegKind() || isImmTy(ImmTyNone));
568 return isRegKind() ? Reg.Mods : Imm.Mods;
569 }
570
571 void setModifiers(Modifiers Mods) {
572 assert(isRegKind() || isImmTy(ImmTyNone));
573 if (isRegKind())
574 Reg.Mods = Mods;
575 else
576 Imm.Mods = Mods;
577 }
578
579 bool hasModifiers() const {
580 return getModifiers().hasModifiers();
581 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000582
Sam Kolton945231a2016-06-10 09:57:59 +0000583 bool hasFPModifiers() const {
584 return getModifiers().hasFPModifiers();
585 }
586
587 bool hasIntModifiers() const {
588 return getModifiers().hasIntModifiers();
589 }
590
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000591 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
592
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000593 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000594
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000595 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000596
Matt Arsenault4bd72362016-12-10 00:39:12 +0000597 template <unsigned Bitwidth>
598 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
599
600 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
601 addKImmFPOperands<16>(Inst, N);
602 }
603
604 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
605 addKImmFPOperands<32>(Inst, N);
606 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000607
608 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000609
610 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
611 if (isRegKind())
612 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000613 else if (isExpr())
614 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000615 else
616 addImmOperands(Inst, N);
617 }
618
619 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
620 Modifiers Mods = getModifiers();
621 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
622 if (isRegKind()) {
623 addRegOperands(Inst, N);
624 } else {
625 addImmOperands(Inst, N, false);
626 }
627 }
628
629 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
630 assert(!hasIntModifiers());
631 addRegOrImmWithInputModsOperands(Inst, N);
632 }
633
634 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
635 assert(!hasFPModifiers());
636 addRegOrImmWithInputModsOperands(Inst, N);
637 }
638
Sam Kolton9772eb32017-01-11 11:46:30 +0000639 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
640 Modifiers Mods = getModifiers();
641 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
642 assert(isRegKind());
643 addRegOperands(Inst, N);
644 }
645
646 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
647 assert(!hasIntModifiers());
648 addRegWithInputModsOperands(Inst, N);
649 }
650
651 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
652 assert(!hasFPModifiers());
653 addRegWithInputModsOperands(Inst, N);
654 }
655
Sam Kolton945231a2016-06-10 09:57:59 +0000656 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
657 if (isImm())
658 addImmOperands(Inst, N);
659 else {
660 assert(isExpr());
661 Inst.addOperand(MCOperand::createExpr(Expr));
662 }
663 }
664
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000665 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000666 switch (Type) {
667 case ImmTyNone: OS << "None"; break;
668 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000669 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000670 case ImmTyOffen: OS << "Offen"; break;
671 case ImmTyIdxen: OS << "Idxen"; break;
672 case ImmTyAddr64: OS << "Addr64"; break;
673 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000674 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000675 case ImmTyOffset0: OS << "Offset0"; break;
676 case ImmTyOffset1: OS << "Offset1"; break;
677 case ImmTyGLC: OS << "GLC"; break;
678 case ImmTySLC: OS << "SLC"; break;
679 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000680 case ImmTyD16: OS << "D16"; break;
Tim Renouf35484c92018-08-21 11:06:05 +0000681 case ImmTyFORMAT: OS << "FORMAT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000682 case ImmTyClampSI: OS << "ClampSI"; break;
683 case ImmTyOModSI: OS << "OModSI"; break;
684 case ImmTyDppCtrl: OS << "DppCtrl"; break;
685 case ImmTyDppRowMask: OS << "DppRowMask"; break;
686 case ImmTyDppBankMask: OS << "DppBankMask"; break;
687 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000688 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
689 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
690 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000691 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
692 case ImmTyDMask: OS << "DMask"; break;
693 case ImmTyUNorm: OS << "UNorm"; break;
694 case ImmTyDA: OS << "DA"; break;
Ryan Taylor1f334d02018-08-28 15:07:30 +0000695 case ImmTyR128A16: OS << "R128A16"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000696 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000697 case ImmTyOff: OS << "Off"; break;
698 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000699 case ImmTyExpCompr: OS << "ExpCompr"; break;
700 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000701 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000702 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000703 case ImmTyInterpSlot: OS << "InterpSlot"; break;
704 case ImmTyInterpAttr: OS << "InterpAttr"; break;
705 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000706 case ImmTyOpSel: OS << "OpSel"; break;
707 case ImmTyOpSelHi: OS << "OpSelHi"; break;
708 case ImmTyNegLo: OS << "NegLo"; break;
709 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000710 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +0000711 case ImmTyGprIdxMode: OS << "GprIdxMode"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000712 case ImmTyHigh: OS << "High"; break;
David Stuttard20ea21c2019-03-12 09:52:58 +0000713 case ImmTyEndpgm:
714 OS << "Endpgm";
715 break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000716 }
717 }
718
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000719 void print(raw_ostream &OS) const override {
720 switch (Kind) {
721 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000722 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000723 break;
724 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000725 OS << '<' << getImm();
726 if (getImmTy() != ImmTyNone) {
727 OS << " type: "; printImmTy(OS, getImmTy());
728 }
Sam Kolton945231a2016-06-10 09:57:59 +0000729 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000730 break;
731 case Token:
732 OS << '\'' << getToken() << '\'';
733 break;
734 case Expression:
735 OS << "<expr " << *Expr << '>';
736 break;
737 }
738 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000739
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000740 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
741 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000742 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000743 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000744 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000745 Op->Imm.Val = Val;
746 Op->Imm.IsFPImm = IsFPImm;
747 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000748 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000749 Op->StartLoc = Loc;
750 Op->EndLoc = Loc;
751 return Op;
752 }
753
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000754 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
755 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000756 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000757 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000758 Res->Tok.Data = Str.data();
759 Res->Tok.Length = Str.size();
760 Res->StartLoc = Loc;
761 Res->EndLoc = Loc;
762 return Res;
763 }
764
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000765 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
766 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000767 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000768 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000769 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000770 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000771 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000772 Op->Reg.IsForcedVOP3 = ForceVOP3;
773 Op->StartLoc = S;
774 Op->EndLoc = E;
775 return Op;
776 }
777
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000778 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
779 const class MCExpr *Expr, SMLoc S) {
780 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000781 Op->Expr = Expr;
782 Op->StartLoc = S;
783 Op->EndLoc = S;
784 return Op;
785 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000786};
787
Sam Kolton945231a2016-06-10 09:57:59 +0000788raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
789 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
790 return OS;
791}
792
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000793//===----------------------------------------------------------------------===//
794// AsmParser
795//===----------------------------------------------------------------------===//
796
Artem Tamazova01cce82016-12-27 16:00:11 +0000797// Holds info related to the current kernel, e.g. count of SGPRs used.
798// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
799// .amdgpu_hsa_kernel or at EOF.
800class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000801 int SgprIndexUnusedMin = -1;
802 int VgprIndexUnusedMin = -1;
803 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000804
805 void usesSgprAt(int i) {
806 if (i >= SgprIndexUnusedMin) {
807 SgprIndexUnusedMin = ++i;
808 if (Ctx) {
809 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
810 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
811 }
812 }
813 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000814
Artem Tamazova01cce82016-12-27 16:00:11 +0000815 void usesVgprAt(int i) {
816 if (i >= VgprIndexUnusedMin) {
817 VgprIndexUnusedMin = ++i;
818 if (Ctx) {
819 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
820 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
821 }
822 }
823 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000824
Artem Tamazova01cce82016-12-27 16:00:11 +0000825public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000826 KernelScopeInfo() = default;
827
Artem Tamazova01cce82016-12-27 16:00:11 +0000828 void initialize(MCContext &Context) {
829 Ctx = &Context;
830 usesSgprAt(SgprIndexUnusedMin = -1);
831 usesVgprAt(VgprIndexUnusedMin = -1);
832 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000833
Artem Tamazova01cce82016-12-27 16:00:11 +0000834 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
835 switch (RegKind) {
836 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
837 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
838 default: break;
839 }
840 }
841};
842
Tom Stellard45bb48e2015-06-13 03:28:10 +0000843class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000844 MCAsmParser &Parser;
845
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000846 // Number of extra operands parsed after the first optional operand.
847 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000848 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000849
Eugene Zelenko66203762017-01-21 00:53:49 +0000850 unsigned ForcedEncodingSize = 0;
851 bool ForcedDPP = false;
852 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000853 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000854
Tom Stellard45bb48e2015-06-13 03:28:10 +0000855 /// @name Auto-generated Match Functions
856 /// {
857
858#define GET_ASSEMBLER_HEADER
859#include "AMDGPUGenAsmMatcher.inc"
860
861 /// }
862
Tom Stellard347ac792015-06-26 21:15:07 +0000863private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000864 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000865 bool OutOfRangeError(SMRange Range);
866 /// Calculate VGPR/SGPR blocks required for given target, reserved
867 /// registers, and user-specified NextFreeXGPR values.
868 ///
869 /// \param Features [in] Target features, used for bug corrections.
870 /// \param VCCUsed [in] Whether VCC special SGPR is reserved.
871 /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
872 /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
873 /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
874 /// \param VGPRRange [in] Token range, used for VGPR diagnostics.
875 /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
876 /// \param SGPRRange [in] Token range, used for SGPR diagnostics.
877 /// \param VGPRBlocks [out] Result VGPR block count.
878 /// \param SGPRBlocks [out] Result SGPR block count.
879 bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
880 bool FlatScrUsed, bool XNACKUsed,
881 unsigned NextFreeVGPR, SMRange VGPRRange,
882 unsigned NextFreeSGPR, SMRange SGPRRange,
883 unsigned &VGPRBlocks, unsigned &SGPRBlocks);
884 bool ParseDirectiveAMDGCNTarget();
885 bool ParseDirectiveAMDHSAKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000886 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
887 bool ParseDirectiveHSACodeObjectVersion();
888 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000889 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
890 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000891 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000892 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000893
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000894 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +0000895 bool ParseDirectiveHSAMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +0000896 bool ParseDirectivePALMetadataBegin();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000897 bool ParseDirectivePALMetadata();
898
Tim Renoufe7bd52f2019-03-20 18:47:21 +0000899 /// Common code to parse out a block of text (typically YAML) between start and
900 /// end directives.
901 bool ParseToEndDirective(const char *AssemblerDirectiveBegin,
902 const char *AssemblerDirectiveEnd,
903 std::string &CollectString);
904
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000905 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
906 RegisterKind RegKind, unsigned Reg1,
907 unsigned RegNum);
908 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
909 unsigned& RegNum, unsigned& RegWidth,
910 unsigned *DwordRegIndex);
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +0000911 bool isRegister();
912 bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000913 Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
914 void initializeGprCountSymbol(RegisterKind RegKind);
915 bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
916 unsigned RegWidth);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000917 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +0000918 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000919 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
920 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000921
Tom Stellard45bb48e2015-06-13 03:28:10 +0000922public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000923 enum AMDGPUMatchResultTy {
924 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
925 };
926
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000927 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000928
Akira Hatanakab11ef082015-11-14 06:35:56 +0000929 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000930 const MCInstrInfo &MII,
931 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +0000932 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000933 MCAsmParserExtension::Initialize(Parser);
934
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000935 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000936 // Set default features.
Matt Arsenault45c165b2019-04-03 00:01:03 +0000937 copySTI().ToggleFeature("southern-islands");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000938 }
939
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000940 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000941
942 {
943 // TODO: make those pre-defined variables read-only.
944 // Currently there is none suitable machinery in the core llvm-mc for this.
945 // MCSymbol::isRedefinable is intended for another purpose, and
946 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000947 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Artem Tamazov17091362016-06-14 15:03:59 +0000948 MCContext &Ctx = getContext();
Scott Linder1e8c2c72018-06-21 19:38:56 +0000949 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
950 MCSymbol *Sym =
951 Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
952 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Dmitry Preobrazhensky62a03182019-02-08 13:51:31 +0000953 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_minor"));
954 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
955 Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_stepping"));
956 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000957 } else {
958 MCSymbol *Sym =
959 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
960 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
961 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
962 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
963 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
964 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
965 }
966 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
967 initializeGprCountSymbol(IS_VGPR);
968 initializeGprCountSymbol(IS_SGPR);
969 } else
970 KernelScope.initialize(getContext());
Artem Tamazov17091362016-06-14 15:03:59 +0000971 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000972 }
973
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000974 bool hasXNACK() const {
975 return AMDGPU::hasXNACK(getSTI());
976 }
977
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000978 bool hasMIMG_R128() const {
979 return AMDGPU::hasMIMG_R128(getSTI());
980 }
981
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000982 bool hasPackedD16() const {
983 return AMDGPU::hasPackedD16(getSTI());
984 }
985
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000986 bool isSI() const {
987 return AMDGPU::isSI(getSTI());
988 }
989
990 bool isCI() const {
991 return AMDGPU::isCI(getSTI());
992 }
993
994 bool isVI() const {
995 return AMDGPU::isVI(getSTI());
996 }
997
Sam Koltonf7659d712017-05-23 10:08:55 +0000998 bool isGFX9() const {
999 return AMDGPU::isGFX9(getSTI());
1000 }
1001
Matt Arsenault26faed32016-12-05 22:26:17 +00001002 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001003 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +00001004 }
1005
Matt Arsenaultfd023142017-06-12 15:55:58 +00001006 bool hasFlatOffsets() const {
1007 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
1008 }
1009
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001010 bool hasSGPR102_SGPR103() const {
1011 return !isVI();
1012 }
1013
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001014 bool hasIntClamp() const {
1015 return getFeatureBits()[AMDGPU::FeatureIntClamp];
1016 }
1017
Tom Stellard347ac792015-06-26 21:15:07 +00001018 AMDGPUTargetStreamer &getTargetStreamer() {
1019 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
1020 return static_cast<AMDGPUTargetStreamer &>(TS);
1021 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001022
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001023 const MCRegisterInfo *getMRI() const {
1024 // We need this const_cast because for some reason getContext() is not const
1025 // in MCAsmParser.
1026 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
1027 }
1028
1029 const MCInstrInfo *getMII() const {
1030 return &MII;
1031 }
1032
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001033 const FeatureBitset &getFeatureBits() const {
1034 return getSTI().getFeatureBits();
1035 }
1036
Sam Kolton05ef1c92016-06-03 10:27:37 +00001037 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
1038 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1039 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +00001040
Sam Kolton05ef1c92016-06-03 10:27:37 +00001041 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
1042 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
1043 bool isForcedDPP() const { return ForcedDPP; }
1044 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +00001045 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001046
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001047 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +00001048 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1049 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +00001050 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
1051 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001052 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1053 OperandVector &Operands, MCStreamer &Out,
1054 uint64_t &ErrorInfo,
1055 bool MatchingInlineAsm) override;
1056 bool ParseDirective(AsmToken DirectiveID) override;
1057 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +00001058 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001059 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1060 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001061 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001062
Sam Kolton11de3702016-05-24 12:38:33 +00001063 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001064
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001065 OperandMatchResultTy
1066 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001067 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001068 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001069
1070 OperandMatchResultTy parseOperandArrayWithPrefix(
1071 const char *Prefix,
1072 OperandVector &Operands,
1073 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1074 bool (*ConvertResult)(int64_t&) = nullptr);
1075
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001076 OperandMatchResultTy
1077 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001078 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001079 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1080 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001081
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001082 bool parseAbsoluteExpr(int64_t &Val, bool HasSP3AbsModifier = false);
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001083 bool parseSP3NegModifier();
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001084 OperandMatchResultTy parseImm(OperandVector &Operands, bool HasSP3AbsModifier = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001085 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001086 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001087 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1088 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1089 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1090 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001091 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Tim Renouf35484c92018-08-21 11:06:05 +00001092 OperandMatchResultTy parseDfmtNfmt(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001093
Tom Stellard45bb48e2015-06-13 03:28:10 +00001094 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001095 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1096 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001097 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001098
1099 bool parseCnt(int64_t &IntVal);
1100 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001101 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001102
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001103private:
1104 struct OperandInfoTy {
1105 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001106 bool IsSymbolic = false;
1107
1108 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001109 };
Sam Kolton11de3702016-05-24 12:38:33 +00001110
Artem Tamazov6edc1352016-05-26 17:00:33 +00001111 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1112 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001113
1114 void errorExpTgt();
1115 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1116
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001117 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00001118 bool validateSOPLiteral(const MCInst &Inst) const;
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001119 bool validateConstantBusLimitations(const MCInst &Inst);
1120 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001121 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001122 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001123 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001124 bool validateMIMGDataSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001125 bool validateMIMGD16(const MCInst &Inst);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001126 bool validateLdsDirect(const MCInst &Inst);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001127 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1128 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1129 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001130
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001131 bool isId(const StringRef Id) const;
1132 bool isId(const AsmToken &Token, const StringRef Id) const;
1133 bool isToken(const AsmToken::TokenKind Kind) const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001134 bool trySkipId(const StringRef Id);
1135 bool trySkipToken(const AsmToken::TokenKind Kind);
1136 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1137 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00001138 void peekTokens(MutableArrayRef<AsmToken> Tokens);
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001139 AsmToken::TokenKind getTokenKind() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001140 bool parseExpr(int64_t &Imm);
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00001141 StringRef getTokenStr() const;
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001142 AsmToken peekToken();
1143 AsmToken getToken() const;
1144 SMLoc getLoc() const;
1145 void lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001146
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001147public:
Sam Kolton11de3702016-05-24 12:38:33 +00001148 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001149 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001150
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001151 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001152 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001153 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1154 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001155 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1156
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001157 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1158 const unsigned MinVal,
1159 const unsigned MaxVal,
1160 const StringRef ErrMsg);
1161 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1162 bool parseSwizzleOffset(int64_t &Imm);
1163 bool parseSwizzleMacro(int64_t &Imm);
1164 bool parseSwizzleQuadPerm(int64_t &Imm);
1165 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1166 bool parseSwizzleBroadcast(int64_t &Imm);
1167 bool parseSwizzleSwap(int64_t &Imm);
1168 bool parseSwizzleReverse(int64_t &Imm);
1169
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00001170 OperandMatchResultTy parseGPRIdxMode(OperandVector &Operands);
1171 int64_t parseGPRIdxMacro();
1172
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001173 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1174 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1175 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001176 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001177 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1178
Sam Kolton5f10a132016-05-06 11:31:17 +00001179 AMDGPUOperand::Ptr defaultGLC() const;
1180 AMDGPUOperand::Ptr defaultSLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001181
Artem Tamazov54bfd542016-10-31 16:07:39 +00001182 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1183 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001184 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001185 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001186 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001187
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001188 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1189
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001190 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1191 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001192 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001193 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001194 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001195
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001196 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1197
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001198 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1199 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001200 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001201
Sam Kolton11de3702016-05-24 12:38:33 +00001202 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001203 AMDGPUOperand::Ptr defaultRowMask() const;
1204 AMDGPUOperand::Ptr defaultBankMask() const;
1205 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1206 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001207
Sam Kolton05ef1c92016-06-03 10:27:37 +00001208 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1209 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001210 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001211 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1212 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001213 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001214 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1215 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001216 uint64_t BasicInstType, bool skipVcc = false);
David Stuttard20ea21c2019-03-12 09:52:58 +00001217
1218 OperandMatchResultTy parseEndpgmOp(OperandVector &Operands);
1219 AMDGPUOperand::Ptr defaultEndpgmImmOperands() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001220};
1221
1222struct OptionalOperand {
1223 const char *Name;
1224 AMDGPUOperand::ImmTy Type;
1225 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001226 bool (*ConvertResult)(int64_t&);
1227};
1228
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001229} // end anonymous namespace
1230
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001231// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001232static const fltSemantics *getFltSemantics(unsigned Size) {
1233 switch (Size) {
1234 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001235 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001236 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001237 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001238 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001239 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001240 default:
1241 llvm_unreachable("unsupported fp type");
1242 }
1243}
1244
Matt Arsenault4bd72362016-12-10 00:39:12 +00001245static const fltSemantics *getFltSemantics(MVT VT) {
1246 return getFltSemantics(VT.getSizeInBits() / 8);
1247}
1248
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001249static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1250 switch (OperandType) {
1251 case AMDGPU::OPERAND_REG_IMM_INT32:
1252 case AMDGPU::OPERAND_REG_IMM_FP32:
1253 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1254 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1255 return &APFloat::IEEEsingle();
1256 case AMDGPU::OPERAND_REG_IMM_INT64:
1257 case AMDGPU::OPERAND_REG_IMM_FP64:
1258 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1259 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1260 return &APFloat::IEEEdouble();
1261 case AMDGPU::OPERAND_REG_IMM_INT16:
1262 case AMDGPU::OPERAND_REG_IMM_FP16:
1263 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1264 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1265 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1266 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1267 return &APFloat::IEEEhalf();
1268 default:
1269 llvm_unreachable("unsupported fp type");
1270 }
1271}
1272
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001273//===----------------------------------------------------------------------===//
1274// Operand
1275//===----------------------------------------------------------------------===//
1276
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001277static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1278 bool Lost;
1279
1280 // Convert literal to single precision
1281 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1282 APFloat::rmNearestTiesToEven,
1283 &Lost);
1284 // We allow precision lost but not overflow or underflow
1285 if (Status != APFloat::opOK &&
1286 Lost &&
1287 ((Status & APFloat::opOverflow) != 0 ||
1288 (Status & APFloat::opUnderflow) != 0)) {
1289 return false;
1290 }
1291
1292 return true;
1293}
1294
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001295static bool isSafeTruncation(int64_t Val, unsigned Size) {
1296 return isUIntN(Size, Val) || isIntN(Size, Val);
1297}
1298
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001299bool AMDGPUOperand::isInlinableImm(MVT type) const {
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001300
1301 // This is a hack to enable named inline values like
1302 // shared_base with both 32-bit and 64-bit operands.
1303 // Note that these values are defined as
1304 // 32-bit operands only.
1305 if (isInlineValue()) {
1306 return true;
1307 }
1308
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001309 if (!isImmTy(ImmTyNone)) {
1310 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1311 return false;
1312 }
1313 // TODO: We should avoid using host float here. It would be better to
1314 // check the float bit values which is what a few other places do.
1315 // We've had bot failures before due to weird NaN support on mips hosts.
1316
1317 APInt Literal(64, Imm.Val);
1318
1319 if (Imm.IsFPImm) { // We got fp literal token
1320 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001321 return AMDGPU::isInlinableLiteral64(Imm.Val,
1322 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001323 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001324
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001325 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001326 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1327 return false;
1328
Sam Kolton9dffada2017-01-17 15:26:02 +00001329 if (type.getScalarSizeInBits() == 16) {
1330 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001331 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001332 AsmParser->hasInv2PiInlineImm());
1333 }
1334
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001335 // Check if single precision literal is inlinable
1336 return AMDGPU::isInlinableLiteral32(
1337 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001338 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001339 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001340
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001341 // We got int literal token.
1342 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001343 return AMDGPU::isInlinableLiteral64(Imm.Val,
1344 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001345 }
1346
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001347 if (!isSafeTruncation(Imm.Val, type.getScalarSizeInBits())) {
1348 return false;
1349 }
1350
Matt Arsenault4bd72362016-12-10 00:39:12 +00001351 if (type.getScalarSizeInBits() == 16) {
1352 return AMDGPU::isInlinableLiteral16(
1353 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1354 AsmParser->hasInv2PiInlineImm());
1355 }
1356
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001357 return AMDGPU::isInlinableLiteral32(
1358 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001359 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001360}
1361
1362bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001363 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001364 if (!isImmTy(ImmTyNone)) {
1365 return false;
1366 }
1367
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001368 if (!Imm.IsFPImm) {
1369 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001370
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001371 if (type == MVT::f64 && hasFPModifiers()) {
1372 // Cannot apply fp modifiers to int literals preserving the same semantics
1373 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1374 // disable these cases.
1375 return false;
1376 }
1377
Matt Arsenault4bd72362016-12-10 00:39:12 +00001378 unsigned Size = type.getSizeInBits();
1379 if (Size == 64)
1380 Size = 32;
1381
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001382 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1383 // types.
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001384 return isSafeTruncation(Imm.Val, Size);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001385 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001386
1387 // We got fp literal token
1388 if (type == MVT::f64) { // Expected 64-bit fp operand
1389 // We would set low 64-bits of literal to zeroes but we accept this literals
1390 return true;
1391 }
1392
1393 if (type == MVT::i64) { // Expected 64-bit int operand
1394 // We don't allow fp literals in 64-bit integer instructions. It is
1395 // unclear how we should encode them.
1396 return false;
1397 }
1398
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001399 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001400 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001401}
1402
1403bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001404 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001405}
1406
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001407bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001408 if (AsmParser->isVI())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001409 return isVReg32();
Sam Kolton549c89d2017-06-21 08:53:38 +00001410 else if (AsmParser->isGFX9())
Dmitry Preobrazhensky79042312019-02-27 13:58:48 +00001411 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001412 else
1413 return false;
1414}
1415
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001416bool AMDGPUOperand::isSDWAFP16Operand() const {
1417 return isSDWAOperand(MVT::f16);
1418}
1419
1420bool AMDGPUOperand::isSDWAFP32Operand() const {
1421 return isSDWAOperand(MVT::f32);
1422}
1423
1424bool AMDGPUOperand::isSDWAInt16Operand() const {
1425 return isSDWAOperand(MVT::i16);
1426}
1427
1428bool AMDGPUOperand::isSDWAInt32Operand() const {
1429 return isSDWAOperand(MVT::i32);
1430}
1431
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001432uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1433{
1434 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1435 assert(Size == 2 || Size == 4 || Size == 8);
1436
1437 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1438
1439 if (Imm.Mods.Abs) {
1440 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001441 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001442 if (Imm.Mods.Neg) {
1443 Val ^= FpSignMask;
1444 }
1445
1446 return Val;
1447}
1448
1449void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001450 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1451 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001452 addLiteralImmOperand(Inst, Imm.Val,
1453 ApplyModifiers &
1454 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001455 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001456 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1457 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001458 }
1459}
1460
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001461void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001462 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1463 auto OpNum = Inst.getNumOperands();
1464 // Check that this operand accepts literals
1465 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1466
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001467 if (ApplyModifiers) {
1468 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1469 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1470 Val = applyInputFPModifiers(Val, Size);
1471 }
1472
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001473 APInt Literal(64, Val);
1474 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001475
1476 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001477 switch (OpTy) {
1478 case AMDGPU::OPERAND_REG_IMM_INT64:
1479 case AMDGPU::OPERAND_REG_IMM_FP64:
1480 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001481 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001482 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1483 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001484 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001485 return;
1486 }
1487
1488 // Non-inlineable
1489 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001490 // For fp operands we check if low 32 bits are zeros
1491 if (Literal.getLoBits(32) != 0) {
1492 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001493 "Can't encode literal as exact 64-bit floating-point operand. "
1494 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001495 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001496
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001497 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001498 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001499 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001500
1501 // We don't allow fp literals in 64-bit integer instructions. It is
1502 // unclear how we should encode them. This case should be checked earlier
1503 // in predicate methods (isLiteralImm())
1504 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001505
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001506 case AMDGPU::OPERAND_REG_IMM_INT32:
1507 case AMDGPU::OPERAND_REG_IMM_FP32:
1508 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1509 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1510 case AMDGPU::OPERAND_REG_IMM_INT16:
1511 case AMDGPU::OPERAND_REG_IMM_FP16:
1512 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1513 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1514 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1515 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001516 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001517 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001518 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001519 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001520 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001521 // We allow precision lost but not overflow or underflow. This should be
1522 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001523
1524 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001525 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001526 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001527 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001528 default:
1529 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001530 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001531
1532 return;
1533 }
1534
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001535 // We got int literal token.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001536 // Only sign extend inline immediates.
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001537 switch (OpTy) {
1538 case AMDGPU::OPERAND_REG_IMM_INT32:
1539 case AMDGPU::OPERAND_REG_IMM_FP32:
1540 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001541 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001542 if (isSafeTruncation(Val, 32) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001543 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1544 AsmParser->hasInv2PiInlineImm())) {
1545 Inst.addOperand(MCOperand::createImm(Val));
1546 return;
1547 }
1548
1549 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1550 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001551
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001552 case AMDGPU::OPERAND_REG_IMM_INT64:
1553 case AMDGPU::OPERAND_REG_IMM_FP64:
1554 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001555 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001556 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001557 Inst.addOperand(MCOperand::createImm(Val));
1558 return;
1559 }
1560
1561 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1562 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001563
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001564 case AMDGPU::OPERAND_REG_IMM_INT16:
1565 case AMDGPU::OPERAND_REG_IMM_FP16:
1566 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001567 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001568 if (isSafeTruncation(Val, 16) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00001569 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1570 AsmParser->hasInv2PiInlineImm())) {
1571 Inst.addOperand(MCOperand::createImm(Val));
1572 return;
1573 }
1574
1575 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1576 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001577
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001578 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1579 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001580 assert(isSafeTruncation(Val, 16));
1581 assert(AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001582 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001583
Dmitry Preobrazhenskyd6827ce2019-03-29 14:50:20 +00001584 Inst.addOperand(MCOperand::createImm(Val));
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001585 return;
1586 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001587 default:
1588 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001589 }
1590}
1591
Matt Arsenault4bd72362016-12-10 00:39:12 +00001592template <unsigned Bitwidth>
1593void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001594 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001595
1596 if (!Imm.IsFPImm) {
1597 // We got int literal token.
1598 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1599 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001600 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001601
1602 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001603 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001604 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1605 APFloat::rmNearestTiesToEven, &Lost);
1606 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001607}
1608
1609void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1610 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1611}
1612
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001613static bool isInlineValue(unsigned Reg) {
1614 switch (Reg) {
1615 case AMDGPU::SRC_SHARED_BASE:
1616 case AMDGPU::SRC_SHARED_LIMIT:
1617 case AMDGPU::SRC_PRIVATE_BASE:
1618 case AMDGPU::SRC_PRIVATE_LIMIT:
1619 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
1620 return true;
1621 default:
1622 return false;
1623 }
1624}
1625
1626bool AMDGPUOperand::isInlineValue() const {
1627 return isRegKind() && ::isInlineValue(getReg());
1628}
1629
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001630//===----------------------------------------------------------------------===//
1631// AsmParser
1632//===----------------------------------------------------------------------===//
1633
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001634static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1635 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001636 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001637 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001638 case 1: return AMDGPU::VGPR_32RegClassID;
1639 case 2: return AMDGPU::VReg_64RegClassID;
1640 case 3: return AMDGPU::VReg_96RegClassID;
1641 case 4: return AMDGPU::VReg_128RegClassID;
1642 case 8: return AMDGPU::VReg_256RegClassID;
1643 case 16: return AMDGPU::VReg_512RegClassID;
1644 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001645 } else if (Is == IS_TTMP) {
1646 switch (RegWidth) {
1647 default: return -1;
1648 case 1: return AMDGPU::TTMP_32RegClassID;
1649 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001650 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001651 case 8: return AMDGPU::TTMP_256RegClassID;
1652 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001653 }
1654 } else if (Is == IS_SGPR) {
1655 switch (RegWidth) {
1656 default: return -1;
1657 case 1: return AMDGPU::SGPR_32RegClassID;
1658 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001659 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001660 case 8: return AMDGPU::SGPR_256RegClassID;
1661 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001662 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001663 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001664 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001665}
1666
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001667static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001668 return StringSwitch<unsigned>(RegName)
1669 .Case("exec", AMDGPU::EXEC)
1670 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001671 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001672 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00001673 .Case("shared_base", AMDGPU::SRC_SHARED_BASE)
1674 .Case("src_shared_base", AMDGPU::SRC_SHARED_BASE)
1675 .Case("shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1676 .Case("src_shared_limit", AMDGPU::SRC_SHARED_LIMIT)
1677 .Case("private_base", AMDGPU::SRC_PRIVATE_BASE)
1678 .Case("src_private_base", AMDGPU::SRC_PRIVATE_BASE)
1679 .Case("private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1680 .Case("src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
1681 .Case("pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
1682 .Case("src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00001683 .Case("lds_direct", AMDGPU::LDS_DIRECT)
1684 .Case("src_lds_direct", AMDGPU::LDS_DIRECT)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001685 .Case("m0", AMDGPU::M0)
1686 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001687 .Case("tba", AMDGPU::TBA)
1688 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001689 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1690 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001691 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1692 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001693 .Case("vcc_lo", AMDGPU::VCC_LO)
1694 .Case("vcc_hi", AMDGPU::VCC_HI)
1695 .Case("exec_lo", AMDGPU::EXEC_LO)
1696 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001697 .Case("tma_lo", AMDGPU::TMA_LO)
1698 .Case("tma_hi", AMDGPU::TMA_HI)
1699 .Case("tba_lo", AMDGPU::TBA_LO)
1700 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001701 .Default(0);
1702}
1703
Eugene Zelenko66203762017-01-21 00:53:49 +00001704bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1705 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001706 auto R = parseRegister();
1707 if (!R) return true;
1708 assert(R->isReg());
1709 RegNo = R->getReg();
1710 StartLoc = R->getStartLoc();
1711 EndLoc = R->getEndLoc();
1712 return false;
1713}
1714
Eugene Zelenko66203762017-01-21 00:53:49 +00001715bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1716 RegisterKind RegKind, unsigned Reg1,
1717 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001718 switch (RegKind) {
1719 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001720 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1721 Reg = AMDGPU::EXEC;
1722 RegWidth = 2;
1723 return true;
1724 }
1725 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1726 Reg = AMDGPU::FLAT_SCR;
1727 RegWidth = 2;
1728 return true;
1729 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001730 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1731 Reg = AMDGPU::XNACK_MASK;
1732 RegWidth = 2;
1733 return true;
1734 }
Eugene Zelenko66203762017-01-21 00:53:49 +00001735 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1736 Reg = AMDGPU::VCC;
1737 RegWidth = 2;
1738 return true;
1739 }
1740 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1741 Reg = AMDGPU::TBA;
1742 RegWidth = 2;
1743 return true;
1744 }
1745 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1746 Reg = AMDGPU::TMA;
1747 RegWidth = 2;
1748 return true;
1749 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001750 return false;
1751 case IS_VGPR:
1752 case IS_SGPR:
1753 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001754 if (Reg1 != Reg + RegWidth) {
1755 return false;
1756 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001757 RegWidth++;
1758 return true;
1759 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001760 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001761 }
1762}
1763
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001764static const StringRef Registers[] = {
1765 { "v" },
1766 { "s" },
1767 { "ttmp" },
1768};
1769
1770bool
1771AMDGPUAsmParser::isRegister(const AsmToken &Token,
1772 const AsmToken &NextToken) const {
1773
1774 // A list of consecutive registers: [s0,s1,s2,s3]
1775 if (Token.is(AsmToken::LBrac))
1776 return true;
1777
1778 if (!Token.is(AsmToken::Identifier))
1779 return false;
1780
1781 // A single register like s0 or a range of registers like s[0:1]
1782
1783 StringRef RegName = Token.getString();
1784
1785 for (StringRef Reg : Registers) {
1786 if (RegName.startswith(Reg)) {
1787 if (Reg.size() < RegName.size()) {
1788 unsigned RegNum;
1789 // A single register with an index: rXX
1790 if (!RegName.substr(Reg.size()).getAsInteger(10, RegNum))
1791 return true;
1792 } else {
1793 // A range of registers: r[XX:YY].
1794 if (NextToken.is(AsmToken::LBrac))
1795 return true;
1796 }
1797 }
1798 }
1799
1800 return getSpecialRegForName(RegName);
1801}
1802
1803bool
1804AMDGPUAsmParser::isRegister()
1805{
1806 return isRegister(getToken(), peekToken());
1807}
1808
Eugene Zelenko66203762017-01-21 00:53:49 +00001809bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1810 unsigned &RegNum, unsigned &RegWidth,
1811 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001812 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001813 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1814 if (getLexer().is(AsmToken::Identifier)) {
1815 StringRef RegName = Parser.getTok().getString();
1816 if ((Reg = getSpecialRegForName(RegName))) {
1817 Parser.Lex();
1818 RegKind = IS_SPECIAL;
1819 } else {
1820 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001821 if (RegName[0] == 'v') {
1822 RegNumIndex = 1;
1823 RegKind = IS_VGPR;
1824 } else if (RegName[0] == 's') {
1825 RegNumIndex = 1;
1826 RegKind = IS_SGPR;
1827 } else if (RegName.startswith("ttmp")) {
1828 RegNumIndex = strlen("ttmp");
1829 RegKind = IS_TTMP;
1830 } else {
1831 return false;
1832 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001833 if (RegName.size() > RegNumIndex) {
1834 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001835 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1836 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001837 Parser.Lex();
1838 RegWidth = 1;
1839 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001840 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001841 Parser.Lex();
1842 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001843 if (getLexer().isNot(AsmToken::LBrac))
1844 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001845 Parser.Lex();
1846
Artem Tamazovf88397c2016-06-03 14:41:17 +00001847 if (getParser().parseAbsoluteExpression(RegLo))
1848 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001849
Artem Tamazov7da9b822016-05-27 12:50:13 +00001850 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001851 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1852 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001853 Parser.Lex();
1854
Artem Tamazov7da9b822016-05-27 12:50:13 +00001855 if (isRBrace) {
1856 RegHi = RegLo;
1857 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001858 if (getParser().parseAbsoluteExpression(RegHi))
1859 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001860
Artem Tamazovf88397c2016-06-03 14:41:17 +00001861 if (getLexer().isNot(AsmToken::RBrac))
1862 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001863 Parser.Lex();
1864 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001865 RegNum = (unsigned) RegLo;
1866 RegWidth = (RegHi - RegLo) + 1;
1867 }
1868 }
1869 } else if (getLexer().is(AsmToken::LBrac)) {
1870 // List of consecutive registers: [s0,s1,s2,s3]
1871 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001872 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001873 return false;
1874 if (RegWidth != 1)
1875 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001876 RegisterKind RegKind1;
1877 unsigned Reg1, RegNum1, RegWidth1;
1878 do {
1879 if (getLexer().is(AsmToken::Comma)) {
1880 Parser.Lex();
1881 } else if (getLexer().is(AsmToken::RBrac)) {
1882 Parser.Lex();
1883 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001884 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001885 if (RegWidth1 != 1) {
1886 return false;
1887 }
1888 if (RegKind1 != RegKind) {
1889 return false;
1890 }
1891 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1892 return false;
1893 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001894 } else {
1895 return false;
1896 }
1897 } while (true);
1898 } else {
1899 return false;
1900 }
1901 switch (RegKind) {
1902 case IS_SPECIAL:
1903 RegNum = 0;
1904 RegWidth = 1;
1905 break;
1906 case IS_VGPR:
1907 case IS_SGPR:
1908 case IS_TTMP:
1909 {
1910 unsigned Size = 1;
1911 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001912 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001913 Size = std::min(RegWidth, 4u);
1914 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001915 if (RegNum % Size != 0)
1916 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001917 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001918 RegNum = RegNum / Size;
1919 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001920 if (RCID == -1)
1921 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001922 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001923 if (RegNum >= RC.getNumRegs())
1924 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001925 Reg = RC.getRegister(RegNum);
1926 break;
1927 }
1928
1929 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001930 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001931 }
1932
Artem Tamazovf88397c2016-06-03 14:41:17 +00001933 if (!subtargetHasRegister(*TRI, Reg))
1934 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001935 return true;
1936}
1937
Scott Linder1e8c2c72018-06-21 19:38:56 +00001938Optional<StringRef>
1939AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
1940 switch (RegKind) {
1941 case IS_VGPR:
1942 return StringRef(".amdgcn.next_free_vgpr");
1943 case IS_SGPR:
1944 return StringRef(".amdgcn.next_free_sgpr");
1945 default:
1946 return None;
1947 }
1948}
1949
1950void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
1951 auto SymbolName = getGprCountSymbolName(RegKind);
1952 assert(SymbolName && "initializing invalid register kind");
1953 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1954 Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
1955}
1956
1957bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
1958 unsigned DwordRegIndex,
1959 unsigned RegWidth) {
1960 // Symbols are only defined for GCN targets
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00001961 if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6)
Scott Linder1e8c2c72018-06-21 19:38:56 +00001962 return true;
1963
1964 auto SymbolName = getGprCountSymbolName(RegKind);
1965 if (!SymbolName)
1966 return true;
1967 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1968
1969 int64_t NewMax = DwordRegIndex + RegWidth - 1;
1970 int64_t OldCount;
1971
1972 if (!Sym->isVariable())
1973 return !Error(getParser().getTok().getLoc(),
1974 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
1975 if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
1976 return !Error(
1977 getParser().getTok().getLoc(),
1978 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
1979
1980 if (OldCount <= NewMax)
1981 Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
1982
1983 return true;
1984}
1985
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001986std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001987 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001988 SMLoc StartLoc = Tok.getLoc();
1989 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001990 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001991 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001992
Artem Tamazova01cce82016-12-27 16:00:11 +00001993 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00001994 //FIXME: improve error messages (bug 41303).
1995 Error(StartLoc, "not a valid operand.");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001996 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001997 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00001998 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
1999 if (!updateGprCountSymbols(RegKind, DwordRegIndex, RegWidth))
2000 return nullptr;
2001 } else
2002 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002003 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002004}
2005
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002006bool
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002007AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool HasSP3AbsModifier) {
2008 if (HasSP3AbsModifier) {
2009 // This is a workaround for handling expressions
2010 // as arguments of SP3 'abs' modifier, for example:
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002011 // |1.0|
2012 // |-1|
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002013 // |1+x|
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002014 // This syntax is not compatible with syntax of standard
2015 // MC expressions (due to the trailing '|').
2016
2017 SMLoc EndLoc;
2018 const MCExpr *Expr;
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002019 SMLoc StartLoc = getLoc();
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002020
2021 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
2022 return true;
2023 }
2024
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002025 if (!Expr->evaluateAsAbsolute(Val))
2026 return Error(StartLoc, "expected absolute expression");
2027
2028 return false;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002029 }
2030
2031 return getParser().parseAbsoluteExpression(Val);
2032}
2033
Alex Bradbury58eba092016-11-01 16:32:05 +00002034OperandMatchResultTy
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002035AMDGPUAsmParser::parseImm(OperandVector &Operands, bool HasSP3AbsModifier) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002036 // TODO: add syntactic sugar for 1/(2*PI)
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002037
2038 const auto& Tok = getToken();
2039 const auto& NextTok = peekToken();
2040 bool IsReal = Tok.is(AsmToken::Real);
2041 SMLoc S = Tok.getLoc();
2042 bool Negate = false;
2043
2044 if (!IsReal && Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Real)) {
2045 lex();
2046 IsReal = true;
2047 Negate = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002048 }
2049
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002050 if (IsReal) {
2051 // Floating-point expressions are not supported.
2052 // Can only allow floating-point literals with an
2053 // optional sign.
2054
2055 StringRef Num = getTokenStr();
2056 lex();
2057
2058 APFloat RealVal(APFloat::IEEEdouble());
2059 auto roundMode = APFloat::rmNearestTiesToEven;
2060 if (RealVal.convertFromString(Num, roundMode) == APFloat::opInvalidOp) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002061 return MatchOperand_ParseFail;
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002062 }
2063 if (Negate)
2064 RealVal.changeSign();
2065
2066 Operands.push_back(
2067 AMDGPUOperand::CreateImm(this, RealVal.bitcastToAPInt().getZExtValue(), S,
2068 AMDGPUOperand::ImmTyNone, true));
2069
2070 return MatchOperand_Success;
2071
2072 // FIXME: Should enable arbitrary expressions here
2073 } else if (Tok.is(AsmToken::Integer) ||
2074 (Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Integer))){
2075
2076 int64_t IntVal;
2077 if (parseAbsoluteExpr(IntVal, HasSP3AbsModifier))
2078 return MatchOperand_ParseFail;
2079
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002080 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00002081 return MatchOperand_Success;
2082 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002083
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00002084 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002085}
2086
Alex Bradbury58eba092016-11-01 16:32:05 +00002087OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00002088AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002089 if (!isRegister())
2090 return MatchOperand_NoMatch;
2091
Sam Kolton1bdcef72016-05-23 09:59:02 +00002092 if (auto R = parseRegister()) {
2093 assert(R->isReg());
2094 R->Reg.IsForcedVOP3 = isForcedVOP3();
2095 Operands.push_back(std::move(R));
2096 return MatchOperand_Success;
2097 }
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002098 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002099}
2100
Alex Bradbury58eba092016-11-01 16:32:05 +00002101OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002102AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002103 auto res = parseReg(Operands);
2104 return (res == MatchOperand_NoMatch)?
2105 parseImm(Operands, AbsMod) :
2106 res;
Sam Kolton9772eb32017-01-11 11:46:30 +00002107}
2108
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002109// Check if the current token is an SP3 'neg' modifier.
2110// Currently this modifier is allowed in the following context:
2111//
2112// 1. Before a register, e.g. "-v0", "-v[...]" or "-[v0,v1]".
2113// 2. Before an 'abs' modifier: -abs(...)
2114// 3. Before an SP3 'abs' modifier: -|...|
2115//
2116// In all other cases "-" is handled as a part
2117// of an expression that follows the sign.
2118//
2119// Note: When "-" is followed by an integer literal,
2120// this is interpreted as integer negation rather
2121// than a floating-point NEG modifier applied to N.
2122// Beside being contr-intuitive, such use of floating-point
2123// NEG modifier would have resulted in different meaning
2124// of integer literals used with VOP1/2/C and VOP3,
2125// for example:
2126// v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
2127// v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
2128// Negative fp literals with preceding "-" are
2129// handled likewise for unifomtity
2130//
2131bool
2132AMDGPUAsmParser::parseSP3NegModifier() {
2133
2134 AsmToken NextToken[2];
2135 peekTokens(NextToken);
2136
2137 if (isToken(AsmToken::Minus) &&
2138 (isRegister(NextToken[0], NextToken[1]) ||
2139 NextToken[0].is(AsmToken::Pipe) ||
2140 isId(NextToken[0], "abs"))) {
2141 lex();
2142 return true;
2143 }
2144
2145 return false;
2146}
2147
Sam Kolton9772eb32017-01-11 11:46:30 +00002148OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002149AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
2150 bool AllowImm) {
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002151 bool Negate, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002152
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002153 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
2154 if (isToken(AsmToken::Minus) && peekToken().is(AsmToken::Minus)) {
2155 Error(getLoc(), "invalid syntax, expected 'neg' modifier");
2156 return MatchOperand_ParseFail;
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002157 }
2158
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002159 Negate = parseSP3NegModifier();
2160
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002161 if (getLexer().getKind() == AsmToken::Identifier &&
2162 Parser.getTok().getString() == "neg") {
2163 if (Negate) {
2164 Error(Parser.getTok().getLoc(), "expected register or immediate");
2165 return MatchOperand_ParseFail;
2166 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002167 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002168 Negate2 = true;
2169 if (getLexer().isNot(AsmToken::LParen)) {
2170 Error(Parser.getTok().getLoc(), "expected left paren after neg");
2171 return MatchOperand_ParseFail;
2172 }
2173 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002174 }
2175
Eugene Zelenko66203762017-01-21 00:53:49 +00002176 if (getLexer().getKind() == AsmToken::Identifier &&
2177 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002178 Parser.Lex();
2179 Abs2 = true;
2180 if (getLexer().isNot(AsmToken::LParen)) {
2181 Error(Parser.getTok().getLoc(), "expected left paren after abs");
2182 return MatchOperand_ParseFail;
2183 }
2184 Parser.Lex();
2185 }
2186
2187 if (getLexer().getKind() == AsmToken::Pipe) {
2188 if (Abs2) {
2189 Error(Parser.getTok().getLoc(), "expected register or immediate");
2190 return MatchOperand_ParseFail;
2191 }
2192 Parser.Lex();
2193 Abs = true;
2194 }
2195
Sam Kolton9772eb32017-01-11 11:46:30 +00002196 OperandMatchResultTy Res;
2197 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002198 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00002199 } else {
2200 Res = parseReg(Operands);
2201 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002202 if (Res != MatchOperand_Success) {
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002203 return (Negate || Negate2 || Abs || Abs2)? MatchOperand_ParseFail : Res;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002204 }
2205
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002206 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002207 if (Abs) {
2208 if (getLexer().getKind() != AsmToken::Pipe) {
2209 Error(Parser.getTok().getLoc(), "expected vertical bar");
2210 return MatchOperand_ParseFail;
2211 }
2212 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002213 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002214 }
2215 if (Abs2) {
2216 if (getLexer().isNot(AsmToken::RParen)) {
2217 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2218 return MatchOperand_ParseFail;
2219 }
2220 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002221 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002222 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002223
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002224 if (Negate) {
2225 Mods.Neg = true;
2226 } else if (Negate2) {
2227 if (getLexer().isNot(AsmToken::RParen)) {
2228 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2229 return MatchOperand_ParseFail;
2230 }
2231 Parser.Lex();
2232 Mods.Neg = true;
2233 }
2234
Sam Kolton945231a2016-06-10 09:57:59 +00002235 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002236 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002237 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00002238 }
2239 return MatchOperand_Success;
2240}
2241
Alex Bradbury58eba092016-11-01 16:32:05 +00002242OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002243AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
2244 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00002245 bool Sext = false;
2246
Eugene Zelenko66203762017-01-21 00:53:49 +00002247 if (getLexer().getKind() == AsmToken::Identifier &&
2248 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00002249 Parser.Lex();
2250 Sext = true;
2251 if (getLexer().isNot(AsmToken::LParen)) {
2252 Error(Parser.getTok().getLoc(), "expected left paren after sext");
2253 return MatchOperand_ParseFail;
2254 }
2255 Parser.Lex();
2256 }
2257
Sam Kolton9772eb32017-01-11 11:46:30 +00002258 OperandMatchResultTy Res;
2259 if (AllowImm) {
2260 Res = parseRegOrImm(Operands);
2261 } else {
2262 Res = parseReg(Operands);
2263 }
Sam Kolton945231a2016-06-10 09:57:59 +00002264 if (Res != MatchOperand_Success) {
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00002265 return Sext? MatchOperand_ParseFail : Res;
Sam Kolton945231a2016-06-10 09:57:59 +00002266 }
2267
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002268 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00002269 if (Sext) {
2270 if (getLexer().isNot(AsmToken::RParen)) {
2271 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2272 return MatchOperand_ParseFail;
2273 }
2274 Parser.Lex();
2275 Mods.Sext = true;
2276 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002277
Sam Kolton945231a2016-06-10 09:57:59 +00002278 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002279 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002280 Op.setModifiers(Mods);
2281 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002282
Sam Kolton945231a2016-06-10 09:57:59 +00002283 return MatchOperand_Success;
2284}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002285
Sam Kolton9772eb32017-01-11 11:46:30 +00002286OperandMatchResultTy
2287AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2288 return parseRegOrImmWithFPInputMods(Operands, false);
2289}
2290
2291OperandMatchResultTy
2292AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2293 return parseRegOrImmWithIntInputMods(Operands, false);
2294}
2295
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002296OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002297 auto Loc = getLoc();
2298 if (trySkipId("off")) {
2299 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Loc,
2300 AMDGPUOperand::ImmTyOff, false));
2301 return MatchOperand_Success;
2302 }
2303
2304 if (!isRegister())
2305 return MatchOperand_NoMatch;
2306
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002307 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2308 if (Reg) {
2309 Operands.push_back(std::move(Reg));
2310 return MatchOperand_Success;
2311 }
2312
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00002313 return MatchOperand_ParseFail;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002314
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002315}
2316
Tom Stellard45bb48e2015-06-13 03:28:10 +00002317unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002318 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2319
2320 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002321 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2322 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2323 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002324 return Match_InvalidOperand;
2325
Tom Stellard88e0b252015-10-06 15:57:53 +00002326 if ((TSFlags & SIInstrFlags::VOP3) &&
2327 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2328 getForcedEncodingSize() != 64)
2329 return Match_PreferE32;
2330
Sam Koltona568e3d2016-12-22 12:57:41 +00002331 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2332 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002333 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002334 auto OpNum =
2335 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002336 const auto &Op = Inst.getOperand(OpNum);
2337 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2338 return Match_InvalidOperand;
2339 }
2340 }
2341
Matt Arsenaultfd023142017-06-12 15:55:58 +00002342 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2343 // FIXME: Produces error without correct column reported.
2344 auto OpNum =
2345 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2346 const auto &Op = Inst.getOperand(OpNum);
2347 if (Op.getImm() != 0)
2348 return Match_InvalidOperand;
2349 }
2350
Tom Stellard45bb48e2015-06-13 03:28:10 +00002351 return Match_Success;
2352}
2353
Matt Arsenault5f45e782017-01-09 18:44:11 +00002354// What asm variants we should check
2355ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2356 if (getForcedEncodingSize() == 32) {
2357 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2358 return makeArrayRef(Variants);
2359 }
2360
2361 if (isForcedVOP3()) {
2362 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2363 return makeArrayRef(Variants);
2364 }
2365
2366 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002367 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2368 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002369 return makeArrayRef(Variants);
2370 }
2371
2372 if (isForcedDPP()) {
2373 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2374 return makeArrayRef(Variants);
2375 }
2376
2377 static const unsigned Variants[] = {
2378 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002379 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002380 };
2381
2382 return makeArrayRef(Variants);
2383}
2384
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002385unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2386 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2387 const unsigned Num = Desc.getNumImplicitUses();
2388 for (unsigned i = 0; i < Num; ++i) {
2389 unsigned Reg = Desc.ImplicitUses[i];
2390 switch (Reg) {
2391 case AMDGPU::FLAT_SCR:
2392 case AMDGPU::VCC:
2393 case AMDGPU::M0:
2394 return Reg;
2395 default:
2396 break;
2397 }
2398 }
2399 return AMDGPU::NoRegister;
2400}
2401
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002402// NB: This code is correct only when used to check constant
2403// bus limitations because GFX7 support no f16 inline constants.
2404// Note that there are no cases when a GFX7 opcode violates
2405// constant bus limitations due to the use of an f16 constant.
2406bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2407 unsigned OpIdx) const {
2408 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2409
2410 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2411 return false;
2412 }
2413
2414 const MCOperand &MO = Inst.getOperand(OpIdx);
2415
2416 int64_t Val = MO.getImm();
2417 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2418
2419 switch (OpSize) { // expected operand size
2420 case 8:
2421 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2422 case 4:
2423 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2424 case 2: {
2425 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2426 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2427 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2428 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2429 } else {
2430 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2431 }
2432 }
2433 default:
2434 llvm_unreachable("invalid operand size");
2435 }
2436}
2437
2438bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2439 const MCOperand &MO = Inst.getOperand(OpIdx);
2440 if (MO.isImm()) {
2441 return !isInlineConstant(Inst, OpIdx);
2442 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002443 return !MO.isReg() ||
2444 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002445}
2446
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002447bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002448 const unsigned Opcode = Inst.getOpcode();
2449 const MCInstrDesc &Desc = MII.get(Opcode);
2450 unsigned ConstantBusUseCount = 0;
2451
2452 if (Desc.TSFlags &
2453 (SIInstrFlags::VOPC |
2454 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002455 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2456 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002457 // Check special imm operands (used by madmk, etc)
2458 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2459 ++ConstantBusUseCount;
2460 }
2461
2462 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2463 if (SGPRUsed != AMDGPU::NoRegister) {
2464 ++ConstantBusUseCount;
2465 }
2466
2467 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2468 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2469 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2470
2471 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2472
2473 for (int OpIdx : OpIndices) {
2474 if (OpIdx == -1) break;
2475
2476 const MCOperand &MO = Inst.getOperand(OpIdx);
2477 if (usesConstantBus(Inst, OpIdx)) {
2478 if (MO.isReg()) {
2479 const unsigned Reg = mc2PseudoReg(MO.getReg());
2480 // Pairs of registers with a partial intersections like these
2481 // s0, s[0:1]
2482 // flat_scratch_lo, flat_scratch
2483 // flat_scratch_lo, flat_scratch_hi
2484 // are theoretically valid but they are disabled anyway.
2485 // Note that this code mimics SIInstrInfo::verifyInstruction
2486 if (Reg != SGPRUsed) {
2487 ++ConstantBusUseCount;
2488 }
2489 SGPRUsed = Reg;
2490 } else { // Expression or a literal
2491 ++ConstantBusUseCount;
2492 }
2493 }
2494 }
2495 }
2496
2497 return ConstantBusUseCount <= 1;
2498}
2499
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002500bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002501 const unsigned Opcode = Inst.getOpcode();
2502 const MCInstrDesc &Desc = MII.get(Opcode);
2503
2504 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2505 if (DstIdx == -1 ||
2506 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2507 return true;
2508 }
2509
2510 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2511
2512 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2513 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2514 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2515
2516 assert(DstIdx != -1);
2517 const MCOperand &Dst = Inst.getOperand(DstIdx);
2518 assert(Dst.isReg());
2519 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2520
2521 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2522
2523 for (int SrcIdx : SrcIndices) {
2524 if (SrcIdx == -1) break;
2525 const MCOperand &Src = Inst.getOperand(SrcIdx);
2526 if (Src.isReg()) {
2527 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2528 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2529 return false;
2530 }
2531 }
2532 }
2533
2534 return true;
2535}
2536
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002537bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2538
2539 const unsigned Opc = Inst.getOpcode();
2540 const MCInstrDesc &Desc = MII.get(Opc);
2541
2542 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2543 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2544 assert(ClampIdx != -1);
2545 return Inst.getOperand(ClampIdx).getImm() == 0;
2546 }
2547
2548 return true;
2549}
2550
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002551bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2552
2553 const unsigned Opc = Inst.getOpcode();
2554 const MCInstrDesc &Desc = MII.get(Opc);
2555
2556 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2557 return true;
2558
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002559 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2560 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2561 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2562
2563 assert(VDataIdx != -1);
2564 assert(DMaskIdx != -1);
2565 assert(TFEIdx != -1);
2566
2567 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2568 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2569 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2570 if (DMask == 0)
2571 DMask = 1;
2572
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002573 unsigned DataSize =
2574 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2575 if (hasPackedD16()) {
2576 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2577 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2578 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002579 }
2580
2581 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002582}
2583
2584bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2585
2586 const unsigned Opc = Inst.getOpcode();
2587 const MCInstrDesc &Desc = MII.get(Opc);
2588
2589 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2590 return true;
2591 if (!Desc.mayLoad() || !Desc.mayStore())
2592 return true; // Not atomic
2593
2594 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2595 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2596
2597 // This is an incomplete check because image_atomic_cmpswap
2598 // may only use 0x3 and 0xf while other atomic operations
2599 // may use 0x1 and 0x3. However these limitations are
2600 // verified when we check that dmask matches dst size.
2601 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2602}
2603
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002604bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
2605
2606 const unsigned Opc = Inst.getOpcode();
2607 const MCInstrDesc &Desc = MII.get(Opc);
2608
2609 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
2610 return true;
2611
2612 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2613 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2614
2615 // GATHER4 instructions use dmask in a different fashion compared to
2616 // other MIMG instructions. The only useful DMASK values are
2617 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2618 // (red,red,red,red) etc.) The ISA document doesn't mention
2619 // this.
2620 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
2621}
2622
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002623bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2624
2625 const unsigned Opc = Inst.getOpcode();
2626 const MCInstrDesc &Desc = MII.get(Opc);
2627
2628 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2629 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002630
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002631 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2632 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
2633 if (isCI() || isSI())
2634 return false;
2635 }
2636
2637 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002638}
2639
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002640static bool IsRevOpcode(const unsigned Opcode)
2641{
2642 switch (Opcode) {
2643 case AMDGPU::V_SUBREV_F32_e32:
2644 case AMDGPU::V_SUBREV_F32_e64:
2645 case AMDGPU::V_SUBREV_F32_e32_si:
2646 case AMDGPU::V_SUBREV_F32_e32_vi:
2647 case AMDGPU::V_SUBREV_F32_e64_si:
2648 case AMDGPU::V_SUBREV_F32_e64_vi:
2649 case AMDGPU::V_SUBREV_I32_e32:
2650 case AMDGPU::V_SUBREV_I32_e64:
2651 case AMDGPU::V_SUBREV_I32_e32_si:
2652 case AMDGPU::V_SUBREV_I32_e64_si:
2653 case AMDGPU::V_SUBBREV_U32_e32:
2654 case AMDGPU::V_SUBBREV_U32_e64:
2655 case AMDGPU::V_SUBBREV_U32_e32_si:
2656 case AMDGPU::V_SUBBREV_U32_e32_vi:
2657 case AMDGPU::V_SUBBREV_U32_e64_si:
2658 case AMDGPU::V_SUBBREV_U32_e64_vi:
2659 case AMDGPU::V_SUBREV_U32_e32:
2660 case AMDGPU::V_SUBREV_U32_e64:
2661 case AMDGPU::V_SUBREV_U32_e32_gfx9:
2662 case AMDGPU::V_SUBREV_U32_e32_vi:
2663 case AMDGPU::V_SUBREV_U32_e64_gfx9:
2664 case AMDGPU::V_SUBREV_U32_e64_vi:
2665 case AMDGPU::V_SUBREV_F16_e32:
2666 case AMDGPU::V_SUBREV_F16_e64:
2667 case AMDGPU::V_SUBREV_F16_e32_vi:
2668 case AMDGPU::V_SUBREV_F16_e64_vi:
2669 case AMDGPU::V_SUBREV_U16_e32:
2670 case AMDGPU::V_SUBREV_U16_e64:
2671 case AMDGPU::V_SUBREV_U16_e32_vi:
2672 case AMDGPU::V_SUBREV_U16_e64_vi:
2673 case AMDGPU::V_SUBREV_CO_U32_e32_gfx9:
2674 case AMDGPU::V_SUBREV_CO_U32_e64_gfx9:
2675 case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9:
2676 case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9:
2677 case AMDGPU::V_LSHLREV_B32_e32_si:
2678 case AMDGPU::V_LSHLREV_B32_e64_si:
2679 case AMDGPU::V_LSHLREV_B16_e32_vi:
2680 case AMDGPU::V_LSHLREV_B16_e64_vi:
2681 case AMDGPU::V_LSHLREV_B32_e32_vi:
2682 case AMDGPU::V_LSHLREV_B32_e64_vi:
2683 case AMDGPU::V_LSHLREV_B64_vi:
2684 case AMDGPU::V_LSHRREV_B32_e32_si:
2685 case AMDGPU::V_LSHRREV_B32_e64_si:
2686 case AMDGPU::V_LSHRREV_B16_e32_vi:
2687 case AMDGPU::V_LSHRREV_B16_e64_vi:
2688 case AMDGPU::V_LSHRREV_B32_e32_vi:
2689 case AMDGPU::V_LSHRREV_B32_e64_vi:
2690 case AMDGPU::V_LSHRREV_B64_vi:
2691 case AMDGPU::V_ASHRREV_I32_e64_si:
2692 case AMDGPU::V_ASHRREV_I32_e32_si:
2693 case AMDGPU::V_ASHRREV_I16_e32_vi:
2694 case AMDGPU::V_ASHRREV_I16_e64_vi:
2695 case AMDGPU::V_ASHRREV_I32_e32_vi:
2696 case AMDGPU::V_ASHRREV_I32_e64_vi:
2697 case AMDGPU::V_ASHRREV_I64_vi:
2698 case AMDGPU::V_PK_LSHLREV_B16_vi:
2699 case AMDGPU::V_PK_LSHRREV_B16_vi:
2700 case AMDGPU::V_PK_ASHRREV_I16_vi:
2701 return true;
2702 default:
2703 return false;
2704 }
2705}
2706
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002707bool AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
2708
2709 using namespace SIInstrFlags;
2710 const unsigned Opcode = Inst.getOpcode();
2711 const MCInstrDesc &Desc = MII.get(Opcode);
2712
2713 // lds_direct register is defined so that it can be used
2714 // with 9-bit operands only. Ignore encodings which do not accept these.
2715 if ((Desc.TSFlags & (VOP1 | VOP2 | VOP3 | VOPC | VOP3P | SIInstrFlags::SDWA)) == 0)
2716 return true;
2717
2718 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2719 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2720 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2721
2722 const int SrcIndices[] = { Src1Idx, Src2Idx };
2723
2724 // lds_direct cannot be specified as either src1 or src2.
2725 for (int SrcIdx : SrcIndices) {
2726 if (SrcIdx == -1) break;
2727 const MCOperand &Src = Inst.getOperand(SrcIdx);
2728 if (Src.isReg() && Src.getReg() == LDS_DIRECT) {
2729 return false;
2730 }
2731 }
2732
2733 if (Src0Idx == -1)
2734 return true;
2735
2736 const MCOperand &Src = Inst.getOperand(Src0Idx);
2737 if (!Src.isReg() || Src.getReg() != LDS_DIRECT)
2738 return true;
2739
2740 // lds_direct is specified as src0. Check additional limitations.
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +00002741 return (Desc.TSFlags & SIInstrFlags::SDWA) == 0 && !IsRevOpcode(Opcode);
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002742}
2743
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00002744bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst) const {
2745 unsigned Opcode = Inst.getOpcode();
2746 const MCInstrDesc &Desc = MII.get(Opcode);
2747 if (!(Desc.TSFlags & (SIInstrFlags::SOP2 | SIInstrFlags::SOPC)))
2748 return true;
2749
2750 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2751 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2752
2753 const int OpIndices[] = { Src0Idx, Src1Idx };
2754
2755 unsigned NumLiterals = 0;
2756 uint32_t LiteralValue;
2757
2758 for (int OpIdx : OpIndices) {
2759 if (OpIdx == -1) break;
2760
2761 const MCOperand &MO = Inst.getOperand(OpIdx);
2762 if (MO.isImm() &&
2763 // Exclude special imm operands (like that used by s_set_gpr_idx_on)
2764 AMDGPU::isSISrcOperand(Desc, OpIdx) &&
2765 !isInlineConstant(Inst, OpIdx)) {
2766 uint32_t Value = static_cast<uint32_t>(MO.getImm());
2767 if (NumLiterals == 0 || LiteralValue != Value) {
2768 LiteralValue = Value;
2769 ++NumLiterals;
2770 }
2771 }
2772 }
2773
2774 return NumLiterals <= 1;
2775}
2776
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002777bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2778 const SMLoc &IDLoc) {
Dmitry Preobrazhensky942c2732019-02-08 14:57:37 +00002779 if (!validateLdsDirect(Inst)) {
2780 Error(IDLoc,
2781 "invalid use of lds_direct");
2782 return false;
2783 }
Dmitry Preobrazhensky61105ba2019-01-18 13:57:43 +00002784 if (!validateSOPLiteral(Inst)) {
2785 Error(IDLoc,
2786 "only one literal operand is allowed");
2787 return false;
2788 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002789 if (!validateConstantBusLimitations(Inst)) {
2790 Error(IDLoc,
2791 "invalid operand (violates constant bus restrictions)");
2792 return false;
2793 }
2794 if (!validateEarlyClobberLimitations(Inst)) {
2795 Error(IDLoc,
2796 "destination must be different than all sources");
2797 return false;
2798 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002799 if (!validateIntClampSupported(Inst)) {
2800 Error(IDLoc,
2801 "integer clamping is not supported on this GPU");
2802 return false;
2803 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002804 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
2805 if (!validateMIMGD16(Inst)) {
2806 Error(IDLoc,
2807 "d16 modifier is not supported on this GPU");
2808 return false;
2809 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002810 if (!validateMIMGDataSize(Inst)) {
2811 Error(IDLoc,
2812 "image data size does not match dmask and tfe");
2813 return false;
2814 }
2815 if (!validateMIMGAtomicDMask(Inst)) {
2816 Error(IDLoc,
2817 "invalid atomic image dmask");
2818 return false;
2819 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002820 if (!validateMIMGGatherDMask(Inst)) {
2821 Error(IDLoc,
2822 "invalid image_gather dmask: only one bit must be set");
2823 return false;
2824 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002825
2826 return true;
2827}
2828
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00002829static std::string AMDGPUMnemonicSpellCheck(StringRef S,
2830 const FeatureBitset &FBS,
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002831 unsigned VariantID = 0);
2832
Tom Stellard45bb48e2015-06-13 03:28:10 +00002833bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2834 OperandVector &Operands,
2835 MCStreamer &Out,
2836 uint64_t &ErrorInfo,
2837 bool MatchingInlineAsm) {
2838 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002839 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002840 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002841 uint64_t EI;
2842 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2843 Variant);
2844 // We order match statuses from least to most specific. We use most specific
2845 // status as resulting
2846 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2847 if ((R == Match_Success) ||
2848 (R == Match_PreferE32) ||
2849 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2850 (R == Match_InvalidOperand && Result != Match_MissingFeature
2851 && Result != Match_PreferE32) ||
2852 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2853 && Result != Match_MissingFeature
2854 && Result != Match_PreferE32)) {
2855 Result = R;
2856 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002857 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002858 if (R == Match_Success)
2859 break;
2860 }
2861
2862 switch (Result) {
2863 default: break;
2864 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002865 if (!validateInstruction(Inst, IDLoc)) {
2866 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002867 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002868 Inst.setLoc(IDLoc);
2869 Out.EmitInstruction(Inst, getSTI());
2870 return false;
2871
2872 case Match_MissingFeature:
2873 return Error(IDLoc, "instruction not supported on this GPU");
2874
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002875 case Match_MnemonicFail: {
Stanislav Mekhanoshine98944e2019-03-11 17:04:35 +00002876 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002877 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2878 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2879 return Error(IDLoc, "invalid instruction" + Suggestion,
2880 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2881 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002882
2883 case Match_InvalidOperand: {
2884 SMLoc ErrorLoc = IDLoc;
2885 if (ErrorInfo != ~0ULL) {
2886 if (ErrorInfo >= Operands.size()) {
2887 return Error(IDLoc, "too few operands for instruction");
2888 }
2889 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2890 if (ErrorLoc == SMLoc())
2891 ErrorLoc = IDLoc;
2892 }
2893 return Error(ErrorLoc, "invalid operand for instruction");
2894 }
2895
2896 case Match_PreferE32:
2897 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2898 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002899 }
2900 llvm_unreachable("Implement any new match types added!");
2901}
2902
Artem Tamazov25478d82016-12-29 15:41:52 +00002903bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2904 int64_t Tmp = -1;
2905 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2906 return true;
2907 }
2908 if (getParser().parseAbsoluteExpression(Tmp)) {
2909 return true;
2910 }
2911 Ret = static_cast<uint32_t>(Tmp);
2912 return false;
2913}
2914
Tom Stellard347ac792015-06-26 21:15:07 +00002915bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2916 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002917 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002918 return TokError("invalid major version");
2919
Tom Stellard347ac792015-06-26 21:15:07 +00002920 if (getLexer().isNot(AsmToken::Comma))
2921 return TokError("minor version number required, comma expected");
2922 Lex();
2923
Artem Tamazov25478d82016-12-29 15:41:52 +00002924 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002925 return TokError("invalid minor version");
2926
Tom Stellard347ac792015-06-26 21:15:07 +00002927 return false;
2928}
2929
Scott Linder1e8c2c72018-06-21 19:38:56 +00002930bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
2931 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2932 return TokError("directive only supported for amdgcn architecture");
2933
2934 std::string Target;
2935
2936 SMLoc TargetStart = getTok().getLoc();
2937 if (getParser().parseEscapedString(Target))
2938 return true;
2939 SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
2940
2941 std::string ExpectedTarget;
2942 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
2943 IsaInfo::streamIsaVersion(&getSTI(), ExpectedTargetOS);
2944
2945 if (Target != ExpectedTargetOS.str())
2946 return getParser().Error(TargetRange.Start, "target must match options",
2947 TargetRange);
2948
2949 getTargetStreamer().EmitDirectiveAMDGCNTarget(Target);
2950 return false;
2951}
2952
2953bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
2954 return getParser().Error(Range.Start, "value out of range", Range);
2955}
2956
2957bool AMDGPUAsmParser::calculateGPRBlocks(
2958 const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
2959 bool XNACKUsed, unsigned NextFreeVGPR, SMRange VGPRRange,
2960 unsigned NextFreeSGPR, SMRange SGPRRange, unsigned &VGPRBlocks,
2961 unsigned &SGPRBlocks) {
2962 // TODO(scott.linder): These calculations are duplicated from
2963 // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002964 IsaVersion Version = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002965
2966 unsigned NumVGPRs = NextFreeVGPR;
2967 unsigned NumSGPRs = NextFreeSGPR;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002968 unsigned MaxAddressableNumSGPRs = IsaInfo::getAddressableNumSGPRs(&getSTI());
Scott Linder1e8c2c72018-06-21 19:38:56 +00002969
2970 if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
2971 NumSGPRs > MaxAddressableNumSGPRs)
2972 return OutOfRangeError(SGPRRange);
2973
2974 NumSGPRs +=
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002975 IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed);
Scott Linder1e8c2c72018-06-21 19:38:56 +00002976
2977 if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
2978 NumSGPRs > MaxAddressableNumSGPRs)
2979 return OutOfRangeError(SGPRRange);
2980
2981 if (Features.test(FeatureSGPRInitBug))
2982 NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
2983
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00002984 VGPRBlocks = IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs);
2985 SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs);
Scott Linder1e8c2c72018-06-21 19:38:56 +00002986
2987 return false;
2988}
2989
2990bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
2991 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2992 return TokError("directive only supported for amdgcn architecture");
2993
2994 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
2995 return TokError("directive only supported for amdhsa OS");
2996
2997 StringRef KernelName;
2998 if (getParser().parseIdentifier(KernelName))
2999 return true;
3000
3001 kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor();
3002
3003 StringSet<> Seen;
3004
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003005 IsaVersion IVersion = getIsaVersion(getSTI().getCPU());
Scott Linder1e8c2c72018-06-21 19:38:56 +00003006
3007 SMRange VGPRRange;
3008 uint64_t NextFreeVGPR = 0;
3009 SMRange SGPRRange;
3010 uint64_t NextFreeSGPR = 0;
3011 unsigned UserSGPRCount = 0;
3012 bool ReserveVCC = true;
3013 bool ReserveFlatScr = true;
3014 bool ReserveXNACK = hasXNACK();
3015
3016 while (true) {
3017 while (getLexer().is(AsmToken::EndOfStatement))
3018 Lex();
3019
3020 if (getLexer().isNot(AsmToken::Identifier))
3021 return TokError("expected .amdhsa_ directive or .end_amdhsa_kernel");
3022
3023 StringRef ID = getTok().getIdentifier();
3024 SMRange IDRange = getTok().getLocRange();
3025 Lex();
3026
3027 if (ID == ".end_amdhsa_kernel")
3028 break;
3029
3030 if (Seen.find(ID) != Seen.end())
3031 return TokError(".amdhsa_ directives cannot be repeated");
3032 Seen.insert(ID);
3033
3034 SMLoc ValStart = getTok().getLoc();
3035 int64_t IVal;
3036 if (getParser().parseAbsoluteExpression(IVal))
3037 return true;
3038 SMLoc ValEnd = getTok().getLoc();
3039 SMRange ValRange = SMRange(ValStart, ValEnd);
3040
3041 if (IVal < 0)
3042 return OutOfRangeError(ValRange);
3043
3044 uint64_t Val = IVal;
3045
3046#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
3047 if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
3048 return OutOfRangeError(RANGE); \
3049 AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
3050
3051 if (ID == ".amdhsa_group_segment_fixed_size") {
3052 if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
3053 return OutOfRangeError(ValRange);
3054 KD.group_segment_fixed_size = Val;
3055 } else if (ID == ".amdhsa_private_segment_fixed_size") {
3056 if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
3057 return OutOfRangeError(ValRange);
3058 KD.private_segment_fixed_size = Val;
3059 } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
3060 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3061 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
3062 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003063 UserSGPRCount += 4;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003064 } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
3065 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3066 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
3067 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003068 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003069 } else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
3070 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3071 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
3072 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003073 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003074 } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
3075 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3076 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
3077 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003078 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003079 } else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
3080 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3081 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
3082 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003083 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003084 } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
3085 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3086 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
3087 ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003088 UserSGPRCount += 2;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003089 } else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
3090 PARSE_BITS_ENTRY(KD.kernel_code_properties,
3091 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
3092 Val, ValRange);
Konstantin Zhuravlyov88268e32019-03-20 19:44:47 +00003093 UserSGPRCount += 1;
Scott Linder1e8c2c72018-06-21 19:38:56 +00003094 } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
3095 PARSE_BITS_ENTRY(
3096 KD.compute_pgm_rsrc2,
3097 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET, Val,
3098 ValRange);
3099 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
3100 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3101 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
3102 ValRange);
3103 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
3104 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3105 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
3106 ValRange);
3107 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
3108 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3109 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
3110 ValRange);
3111 } else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
3112 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3113 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
3114 ValRange);
3115 } else if (ID == ".amdhsa_system_vgpr_workitem_id") {
3116 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3117 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
3118 ValRange);
3119 } else if (ID == ".amdhsa_next_free_vgpr") {
3120 VGPRRange = ValRange;
3121 NextFreeVGPR = Val;
3122 } else if (ID == ".amdhsa_next_free_sgpr") {
3123 SGPRRange = ValRange;
3124 NextFreeSGPR = Val;
3125 } else if (ID == ".amdhsa_reserve_vcc") {
3126 if (!isUInt<1>(Val))
3127 return OutOfRangeError(ValRange);
3128 ReserveVCC = Val;
3129 } else if (ID == ".amdhsa_reserve_flat_scratch") {
3130 if (IVersion.Major < 7)
3131 return getParser().Error(IDRange.Start, "directive requires gfx7+",
3132 IDRange);
3133 if (!isUInt<1>(Val))
3134 return OutOfRangeError(ValRange);
3135 ReserveFlatScr = Val;
3136 } else if (ID == ".amdhsa_reserve_xnack_mask") {
3137 if (IVersion.Major < 8)
3138 return getParser().Error(IDRange.Start, "directive requires gfx8+",
3139 IDRange);
3140 if (!isUInt<1>(Val))
3141 return OutOfRangeError(ValRange);
3142 ReserveXNACK = Val;
3143 } else if (ID == ".amdhsa_float_round_mode_32") {
3144 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3145 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
3146 } else if (ID == ".amdhsa_float_round_mode_16_64") {
3147 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3148 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
3149 } else if (ID == ".amdhsa_float_denorm_mode_32") {
3150 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3151 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
3152 } else if (ID == ".amdhsa_float_denorm_mode_16_64") {
3153 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3154 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
3155 ValRange);
3156 } else if (ID == ".amdhsa_dx10_clamp") {
3157 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
3158 COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
3159 } else if (ID == ".amdhsa_ieee_mode") {
3160 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
3161 Val, ValRange);
3162 } else if (ID == ".amdhsa_fp16_overflow") {
3163 if (IVersion.Major < 9)
3164 return getParser().Error(IDRange.Start, "directive requires gfx9+",
3165 IDRange);
3166 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
3167 ValRange);
3168 } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
3169 PARSE_BITS_ENTRY(
3170 KD.compute_pgm_rsrc2,
3171 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
3172 ValRange);
3173 } else if (ID == ".amdhsa_exception_fp_denorm_src") {
3174 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3175 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
3176 Val, ValRange);
3177 } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
3178 PARSE_BITS_ENTRY(
3179 KD.compute_pgm_rsrc2,
3180 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
3181 ValRange);
3182 } else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
3183 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3184 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
3185 Val, ValRange);
3186 } else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
3187 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3188 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
3189 Val, ValRange);
3190 } else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
3191 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3192 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
3193 Val, ValRange);
3194 } else if (ID == ".amdhsa_exception_int_div_zero") {
3195 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
3196 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
3197 Val, ValRange);
3198 } else {
3199 return getParser().Error(IDRange.Start,
3200 "unknown .amdhsa_kernel directive", IDRange);
3201 }
3202
3203#undef PARSE_BITS_ENTRY
3204 }
3205
3206 if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
3207 return TokError(".amdhsa_next_free_vgpr directive is required");
3208
3209 if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
3210 return TokError(".amdhsa_next_free_sgpr directive is required");
3211
3212 unsigned VGPRBlocks;
3213 unsigned SGPRBlocks;
3214 if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
3215 ReserveXNACK, NextFreeVGPR, VGPRRange, NextFreeSGPR,
3216 SGPRRange, VGPRBlocks, SGPRBlocks))
3217 return true;
3218
3219 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
3220 VGPRBlocks))
3221 return OutOfRangeError(VGPRRange);
3222 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3223 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
3224
3225 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
3226 SGPRBlocks))
3227 return OutOfRangeError(SGPRRange);
3228 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
3229 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
3230 SGPRBlocks);
3231
3232 if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
3233 return TokError("too many user SGPRs enabled");
3234 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
3235 UserSGPRCount);
3236
3237 getTargetStreamer().EmitAmdhsaKernelDescriptor(
3238 getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
3239 ReserveFlatScr, ReserveXNACK);
3240 return false;
3241}
3242
Tom Stellard347ac792015-06-26 21:15:07 +00003243bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00003244 uint32_t Major;
3245 uint32_t Minor;
3246
3247 if (ParseDirectiveMajorMinor(Major, Minor))
3248 return true;
3249
3250 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
3251 return false;
3252}
3253
3254bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00003255 uint32_t Major;
3256 uint32_t Minor;
3257 uint32_t Stepping;
3258 StringRef VendorName;
3259 StringRef ArchName;
3260
3261 // If this directive has no arguments, then use the ISA version for the
3262 // targeted GPU.
3263 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003264 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003265 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
3266 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00003267 "AMD", "AMDGPU");
3268 return false;
3269 }
3270
Tom Stellard347ac792015-06-26 21:15:07 +00003271 if (ParseDirectiveMajorMinor(Major, Minor))
3272 return true;
3273
3274 if (getLexer().isNot(AsmToken::Comma))
3275 return TokError("stepping version number required, comma expected");
3276 Lex();
3277
Artem Tamazov25478d82016-12-29 15:41:52 +00003278 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00003279 return TokError("invalid stepping version");
3280
Tom Stellard347ac792015-06-26 21:15:07 +00003281 if (getLexer().isNot(AsmToken::Comma))
3282 return TokError("vendor name required, comma expected");
3283 Lex();
3284
3285 if (getLexer().isNot(AsmToken::String))
3286 return TokError("invalid vendor name");
3287
3288 VendorName = getLexer().getTok().getStringContents();
3289 Lex();
3290
3291 if (getLexer().isNot(AsmToken::Comma))
3292 return TokError("arch name required, comma expected");
3293 Lex();
3294
3295 if (getLexer().isNot(AsmToken::String))
3296 return TokError("invalid arch name");
3297
3298 ArchName = getLexer().getTok().getStringContents();
3299 Lex();
3300
3301 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
3302 VendorName, ArchName);
3303 return false;
3304}
3305
Tom Stellardff7416b2015-06-26 21:58:31 +00003306bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
3307 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00003308 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
3309 // assembly for backwards compatibility.
3310 if (ID == "max_scratch_backing_memory_byte_size") {
3311 Parser.eatToEndOfStatement();
3312 return false;
3313 }
3314
Valery Pykhtindc110542016-03-06 20:25:36 +00003315 SmallString<40> ErrStr;
3316 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00003317 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00003318 return TokError(Err.str());
3319 }
Tom Stellardff7416b2015-06-26 21:58:31 +00003320 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00003321 return false;
3322}
3323
3324bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00003325 amd_kernel_code_t Header;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00003326 AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
Tom Stellardff7416b2015-06-26 21:58:31 +00003327
3328 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00003329 // Lex EndOfStatement. This is in a while loop, because lexing a comment
3330 // will set the current token to EndOfStatement.
3331 while(getLexer().is(AsmToken::EndOfStatement))
3332 Lex();
3333
3334 if (getLexer().isNot(AsmToken::Identifier))
3335 return TokError("expected value identifier or .end_amd_kernel_code_t");
3336
3337 StringRef ID = getLexer().getTok().getIdentifier();
3338 Lex();
3339
3340 if (ID == ".end_amd_kernel_code_t")
3341 break;
3342
3343 if (ParseAMDKernelCodeTValue(ID, Header))
3344 return true;
3345 }
3346
3347 getTargetStreamer().EmitAMDKernelCodeT(Header);
3348
3349 return false;
3350}
3351
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003352bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
3353 if (getLexer().isNot(AsmToken::Identifier))
3354 return TokError("expected symbol name");
3355
3356 StringRef KernelName = Parser.getTok().getString();
3357
3358 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
3359 ELF::STT_AMDGPU_HSA_KERNEL);
3360 Lex();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003361 if (!AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI()))
3362 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003363 return false;
3364}
3365
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003366bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003367 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
3368 return Error(getParser().getTok().getLoc(),
3369 ".amd_amdgpu_isa directive is not available on non-amdgcn "
3370 "architectures");
3371 }
3372
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003373 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
3374
3375 std::string ISAVersionStringFromSTI;
3376 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
3377 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
3378
3379 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
3380 return Error(getParser().getTok().getLoc(),
3381 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
3382 "arguments specified through the command line");
3383 }
3384
3385 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
3386 Lex();
3387
3388 return false;
3389}
3390
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003391bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Scott Linderf5b36e52018-12-12 19:39:27 +00003392 const char *AssemblerDirectiveBegin;
3393 const char *AssemblerDirectiveEnd;
3394 std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) =
3395 AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())
3396 ? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin,
3397 HSAMD::V3::AssemblerDirectiveEnd)
3398 : std::make_tuple(HSAMD::AssemblerDirectiveBegin,
3399 HSAMD::AssemblerDirectiveEnd);
3400
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003401 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
3402 return Error(getParser().getTok().getLoc(),
Scott Linderf5b36e52018-12-12 19:39:27 +00003403 (Twine(AssemblerDirectiveBegin) + Twine(" directive is "
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003404 "not available on non-amdhsa OSes")).str());
3405 }
3406
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003407 std::string HSAMetadataString;
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003408 if (ParseToEndDirective(AssemblerDirectiveBegin, AssemblerDirectiveEnd,
3409 HSAMetadataString))
3410 return true;
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003411
Scott Linderf5b36e52018-12-12 19:39:27 +00003412 if (IsaInfo::hasCodeObjectV3(&getSTI())) {
3413 if (!getTargetStreamer().EmitHSAMetadataV3(HSAMetadataString))
3414 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3415 } else {
3416 if (!getTargetStreamer().EmitHSAMetadataV2(HSAMetadataString))
3417 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3418 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003419
3420 return false;
3421}
3422
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003423/// Common code to parse out a block of text (typically YAML) between start and
3424/// end directives.
3425bool AMDGPUAsmParser::ParseToEndDirective(const char *AssemblerDirectiveBegin,
3426 const char *AssemblerDirectiveEnd,
3427 std::string &CollectString) {
3428
3429 raw_string_ostream CollectStream(CollectString);
3430
3431 getLexer().setSkipSpace(false);
3432
3433 bool FoundEnd = false;
3434 while (!getLexer().is(AsmToken::Eof)) {
3435 while (getLexer().is(AsmToken::Space)) {
3436 CollectStream << getLexer().getTok().getString();
3437 Lex();
3438 }
3439
3440 if (getLexer().is(AsmToken::Identifier)) {
3441 StringRef ID = getLexer().getTok().getIdentifier();
3442 if (ID == AssemblerDirectiveEnd) {
3443 Lex();
3444 FoundEnd = true;
3445 break;
3446 }
3447 }
3448
3449 CollectStream << Parser.parseStringToEndOfStatement()
3450 << getContext().getAsmInfo()->getSeparatorString();
3451
3452 Parser.eatToEndOfStatement();
3453 }
3454
3455 getLexer().setSkipSpace(true);
3456
3457 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
3458 return TokError(Twine("expected directive ") +
3459 Twine(AssemblerDirectiveEnd) + Twine(" not found"));
3460 }
3461
3462 CollectStream.flush();
3463 return false;
3464}
3465
3466/// Parse the assembler directive for new MsgPack-format PAL metadata.
3467bool AMDGPUAsmParser::ParseDirectivePALMetadataBegin() {
3468 std::string String;
3469 if (ParseToEndDirective(AMDGPU::PALMD::AssemblerDirectiveBegin,
3470 AMDGPU::PALMD::AssemblerDirectiveEnd, String))
3471 return true;
3472
3473 auto PALMetadata = getTargetStreamer().getPALMetadata();
3474 if (!PALMetadata->setFromString(String))
3475 return Error(getParser().getTok().getLoc(), "invalid PAL metadata");
3476 return false;
3477}
3478
3479/// Parse the assembler directive for old linear-format PAL metadata.
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003480bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003481 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
3482 return Error(getParser().getTok().getLoc(),
3483 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
3484 "not available on non-amdpal OSes")).str());
3485 }
3486
Tim Renoufd737b552019-03-20 17:42:00 +00003487 auto PALMetadata = getTargetStreamer().getPALMetadata();
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003488 PALMetadata->setLegacy();
Tim Renouf72800f02017-10-03 19:03:52 +00003489 for (;;) {
Tim Renoufd737b552019-03-20 17:42:00 +00003490 uint32_t Key, Value;
3491 if (ParseAsAbsoluteExpression(Key)) {
3492 return TokError(Twine("invalid value in ") +
3493 Twine(PALMD::AssemblerDirective));
3494 }
3495 if (getLexer().isNot(AsmToken::Comma)) {
3496 return TokError(Twine("expected an even number of values in ") +
3497 Twine(PALMD::AssemblerDirective));
3498 }
3499 Lex();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003500 if (ParseAsAbsoluteExpression(Value)) {
3501 return TokError(Twine("invalid value in ") +
3502 Twine(PALMD::AssemblerDirective));
3503 }
Tim Renoufd737b552019-03-20 17:42:00 +00003504 PALMetadata->setRegister(Key, Value);
Tim Renouf72800f02017-10-03 19:03:52 +00003505 if (getLexer().isNot(AsmToken::Comma))
3506 break;
3507 Lex();
3508 }
Tim Renouf72800f02017-10-03 19:03:52 +00003509 return false;
3510}
3511
Tom Stellard45bb48e2015-06-13 03:28:10 +00003512bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00003513 StringRef IDVal = DirectiveID.getString();
3514
Scott Linder1e8c2c72018-06-21 19:38:56 +00003515 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
3516 if (IDVal == ".amdgcn_target")
3517 return ParseDirectiveAMDGCNTarget();
Tom Stellard347ac792015-06-26 21:15:07 +00003518
Scott Linder1e8c2c72018-06-21 19:38:56 +00003519 if (IDVal == ".amdhsa_kernel")
3520 return ParseDirectiveAMDHSAKernel();
Scott Linderf5b36e52018-12-12 19:39:27 +00003521
3522 // TODO: Restructure/combine with PAL metadata directive.
3523 if (IDVal == AMDGPU::HSAMD::V3::AssemblerDirectiveBegin)
3524 return ParseDirectiveHSAMetadata();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003525 } else {
3526 if (IDVal == ".hsa_code_object_version")
3527 return ParseDirectiveHSACodeObjectVersion();
Tom Stellard347ac792015-06-26 21:15:07 +00003528
Scott Linder1e8c2c72018-06-21 19:38:56 +00003529 if (IDVal == ".hsa_code_object_isa")
3530 return ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00003531
Scott Linder1e8c2c72018-06-21 19:38:56 +00003532 if (IDVal == ".amd_kernel_code_t")
3533 return ParseDirectiveAMDKernelCodeT();
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003534
Scott Linder1e8c2c72018-06-21 19:38:56 +00003535 if (IDVal == ".amdgpu_hsa_kernel")
3536 return ParseDirectiveAMDGPUHsaKernel();
3537
3538 if (IDVal == ".amd_amdgpu_isa")
3539 return ParseDirectiveISAVersion();
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003540
Scott Linderf5b36e52018-12-12 19:39:27 +00003541 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
3542 return ParseDirectiveHSAMetadata();
3543 }
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003544
Tim Renoufe7bd52f2019-03-20 18:47:21 +00003545 if (IDVal == PALMD::AssemblerDirectiveBegin)
3546 return ParseDirectivePALMetadataBegin();
3547
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003548 if (IDVal == PALMD::AssemblerDirective)
3549 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00003550
Tom Stellard45bb48e2015-06-13 03:28:10 +00003551 return true;
3552}
3553
Matt Arsenault68802d32015-11-05 03:11:27 +00003554bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
3555 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003556
3557 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
3558 R.isValid(); ++R) {
3559 if (*R == RegNo)
3560 return isGFX9();
3561 }
3562
3563 switch (RegNo) {
3564 case AMDGPU::TBA:
3565 case AMDGPU::TBA_LO:
3566 case AMDGPU::TBA_HI:
3567 case AMDGPU::TMA:
3568 case AMDGPU::TMA_LO:
3569 case AMDGPU::TMA_HI:
3570 return !isGFX9();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00003571 case AMDGPU::XNACK_MASK:
3572 case AMDGPU::XNACK_MASK_LO:
3573 case AMDGPU::XNACK_MASK_HI:
3574 return !isCI() && !isSI() && hasXNACK();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003575 default:
3576 break;
3577 }
3578
Dmitry Preobrazhensky137976f2019-03-20 15:40:52 +00003579 if (isInlineValue(RegNo))
3580 return !isCI() && !isSI() && !isVI();
3581
Matt Arsenault3b159672015-12-01 20:31:08 +00003582 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00003583 return true;
3584
Matt Arsenault3b159672015-12-01 20:31:08 +00003585 if (isSI()) {
3586 // No flat_scr
3587 switch (RegNo) {
3588 case AMDGPU::FLAT_SCR:
3589 case AMDGPU::FLAT_SCR_LO:
3590 case AMDGPU::FLAT_SCR_HI:
3591 return false;
3592 default:
3593 return true;
3594 }
3595 }
3596
Matt Arsenault68802d32015-11-05 03:11:27 +00003597 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
3598 // SI/CI have.
3599 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
3600 R.isValid(); ++R) {
3601 if (*R == RegNo)
3602 return false;
3603 }
3604
3605 return true;
3606}
3607
Alex Bradbury58eba092016-11-01 16:32:05 +00003608OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003609AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003610 // Try to parse with a custom parser
3611 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3612
3613 // If we successfully parsed the operand or if there as an error parsing,
3614 // we are done.
3615 //
3616 // If we are parsing after we reach EndOfStatement then this means we
3617 // are appending default values to the Operands list. This is only done
3618 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00003619 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00003620 getLexer().is(AsmToken::EndOfStatement))
3621 return ResTy;
3622
Sam Kolton1bdcef72016-05-23 09:59:02 +00003623 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00003624
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00003625 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail)
Sam Kolton1bdcef72016-05-23 09:59:02 +00003626 return ResTy;
3627
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003628 const auto &Tok = Parser.getTok();
3629 SMLoc S = Tok.getLoc();
Tom Stellard89049702016-06-15 02:54:14 +00003630
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003631 const MCExpr *Expr = nullptr;
3632 if (!Parser.parseExpression(Expr)) {
3633 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
3634 return MatchOperand_Success;
3635 }
3636
3637 // Possibly this is an instruction flag like 'gds'.
3638 if (Tok.getKind() == AsmToken::Identifier) {
3639 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003640 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00003641 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003642 }
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003643
Sam Kolton1bdcef72016-05-23 09:59:02 +00003644 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003645}
3646
Sam Kolton05ef1c92016-06-03 10:27:37 +00003647StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
3648 // Clear any forced encodings from the previous instruction.
3649 setForcedEncodingSize(0);
3650 setForcedDPP(false);
3651 setForcedSDWA(false);
3652
3653 if (Name.endswith("_e64")) {
3654 setForcedEncodingSize(64);
3655 return Name.substr(0, Name.size() - 4);
3656 } else if (Name.endswith("_e32")) {
3657 setForcedEncodingSize(32);
3658 return Name.substr(0, Name.size() - 4);
3659 } else if (Name.endswith("_dpp")) {
3660 setForcedDPP(true);
3661 return Name.substr(0, Name.size() - 4);
3662 } else if (Name.endswith("_sdwa")) {
3663 setForcedSDWA(true);
3664 return Name.substr(0, Name.size() - 5);
3665 }
3666 return Name;
3667}
3668
Tom Stellard45bb48e2015-06-13 03:28:10 +00003669bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
3670 StringRef Name,
3671 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003672 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00003673 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003674 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00003675
Tom Stellard45bb48e2015-06-13 03:28:10 +00003676 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00003677 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003678
3679 // Eat the comma or space if there is one.
3680 if (getLexer().is(AsmToken::Comma))
3681 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00003682
Tom Stellard45bb48e2015-06-13 03:28:10 +00003683 switch (Res) {
3684 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003685 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003686 Error(getLexer().getLoc(), "failed parsing operand.");
3687 while (!getLexer().is(AsmToken::EndOfStatement)) {
3688 Parser.Lex();
3689 }
3690 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003691 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003692 Error(getLexer().getLoc(), "not a valid operand.");
3693 while (!getLexer().is(AsmToken::EndOfStatement)) {
3694 Parser.Lex();
3695 }
3696 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003697 }
3698 }
3699
Tom Stellard45bb48e2015-06-13 03:28:10 +00003700 return false;
3701}
3702
3703//===----------------------------------------------------------------------===//
3704// Utility functions
3705//===----------------------------------------------------------------------===//
3706
Alex Bradbury58eba092016-11-01 16:32:05 +00003707OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00003708AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003709 switch(getLexer().getKind()) {
3710 default: return MatchOperand_NoMatch;
3711 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003712 StringRef Name = Parser.getTok().getString();
3713 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003714 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003715 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003716
3717 Parser.Lex();
3718 if (getLexer().isNot(AsmToken::Colon))
3719 return MatchOperand_ParseFail;
3720
3721 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003722
3723 bool IsMinus = false;
3724 if (getLexer().getKind() == AsmToken::Minus) {
3725 Parser.Lex();
3726 IsMinus = true;
3727 }
3728
Tom Stellard45bb48e2015-06-13 03:28:10 +00003729 if (getLexer().isNot(AsmToken::Integer))
3730 return MatchOperand_ParseFail;
3731
3732 if (getParser().parseAbsoluteExpression(Int))
3733 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003734
3735 if (IsMinus)
3736 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003737 break;
3738 }
3739 }
3740 return MatchOperand_Success;
3741}
3742
Alex Bradbury58eba092016-11-01 16:32:05 +00003743OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003744AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003745 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003746 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003747 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003748 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003749
Alex Bradbury58eba092016-11-01 16:32:05 +00003750 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003751 if (Res != MatchOperand_Success)
3752 return Res;
3753
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003754 if (ConvertResult && !ConvertResult(Value)) {
3755 return MatchOperand_ParseFail;
3756 }
3757
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003758 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003759 return MatchOperand_Success;
3760}
3761
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003762OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
3763 const char *Prefix,
3764 OperandVector &Operands,
3765 AMDGPUOperand::ImmTy ImmTy,
3766 bool (*ConvertResult)(int64_t&)) {
3767 StringRef Name = Parser.getTok().getString();
3768 if (!Name.equals(Prefix))
3769 return MatchOperand_NoMatch;
3770
3771 Parser.Lex();
3772 if (getLexer().isNot(AsmToken::Colon))
3773 return MatchOperand_ParseFail;
3774
3775 Parser.Lex();
3776 if (getLexer().isNot(AsmToken::LBrac))
3777 return MatchOperand_ParseFail;
3778 Parser.Lex();
3779
3780 unsigned Val = 0;
3781 SMLoc S = Parser.getTok().getLoc();
3782
3783 // FIXME: How to verify the number of elements matches the number of src
3784 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00003785 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003786 if (I != 0) {
3787 if (getLexer().is(AsmToken::RBrac))
3788 break;
3789
3790 if (getLexer().isNot(AsmToken::Comma))
3791 return MatchOperand_ParseFail;
3792 Parser.Lex();
3793 }
3794
3795 if (getLexer().isNot(AsmToken::Integer))
3796 return MatchOperand_ParseFail;
3797
3798 int64_t Op;
3799 if (getParser().parseAbsoluteExpression(Op))
3800 return MatchOperand_ParseFail;
3801
3802 if (Op != 0 && Op != 1)
3803 return MatchOperand_ParseFail;
3804 Val |= (Op << I);
3805 }
3806
3807 Parser.Lex();
3808 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
3809 return MatchOperand_Success;
3810}
3811
Alex Bradbury58eba092016-11-01 16:32:05 +00003812OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003813AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003814 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003815 int64_t Bit = 0;
3816 SMLoc S = Parser.getTok().getLoc();
3817
3818 // We are at the end of the statement, and this is a default argument, so
3819 // use a default value.
3820 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3821 switch(getLexer().getKind()) {
3822 case AsmToken::Identifier: {
3823 StringRef Tok = Parser.getTok().getString();
3824 if (Tok == Name) {
Ryan Taylor1f334d02018-08-28 15:07:30 +00003825 if (Tok == "r128" && isGFX9())
3826 Error(S, "r128 modifier is not supported on this GPU");
3827 if (Tok == "a16" && !isGFX9())
3828 Error(S, "a16 modifier is not supported on this GPU");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003829 Bit = 1;
3830 Parser.Lex();
3831 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
3832 Bit = 0;
3833 Parser.Lex();
3834 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00003835 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003836 }
3837 break;
3838 }
3839 default:
3840 return MatchOperand_NoMatch;
3841 }
3842 }
3843
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003844 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003845 return MatchOperand_Success;
3846}
3847
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003848static void addOptionalImmOperand(
3849 MCInst& Inst, const OperandVector& Operands,
3850 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
3851 AMDGPUOperand::ImmTy ImmT,
3852 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003853 auto i = OptionalIdx.find(ImmT);
3854 if (i != OptionalIdx.end()) {
3855 unsigned Idx = i->second;
3856 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
3857 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00003858 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003859 }
3860}
3861
Alex Bradbury58eba092016-11-01 16:32:05 +00003862OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003863AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00003864 if (getLexer().isNot(AsmToken::Identifier)) {
3865 return MatchOperand_NoMatch;
3866 }
3867 StringRef Tok = Parser.getTok().getString();
3868 if (Tok != Prefix) {
3869 return MatchOperand_NoMatch;
3870 }
3871
3872 Parser.Lex();
3873 if (getLexer().isNot(AsmToken::Colon)) {
3874 return MatchOperand_ParseFail;
3875 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003876
Sam Kolton3025e7f2016-04-26 13:33:56 +00003877 Parser.Lex();
3878 if (getLexer().isNot(AsmToken::Identifier)) {
3879 return MatchOperand_ParseFail;
3880 }
3881
3882 Value = Parser.getTok().getString();
3883 return MatchOperand_Success;
3884}
3885
Tim Renouf35484c92018-08-21 11:06:05 +00003886// dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their
3887// values to live in a joint format operand in the MCInst encoding.
3888OperandMatchResultTy
3889AMDGPUAsmParser::parseDfmtNfmt(OperandVector &Operands) {
3890 SMLoc S = Parser.getTok().getLoc();
3891 int64_t Dfmt = 0, Nfmt = 0;
3892 // dfmt and nfmt can appear in either order, and each is optional.
3893 bool GotDfmt = false, GotNfmt = false;
3894 while (!GotDfmt || !GotNfmt) {
3895 if (!GotDfmt) {
3896 auto Res = parseIntWithPrefix("dfmt", Dfmt);
3897 if (Res != MatchOperand_NoMatch) {
3898 if (Res != MatchOperand_Success)
3899 return Res;
3900 if (Dfmt >= 16) {
3901 Error(Parser.getTok().getLoc(), "out of range dfmt");
3902 return MatchOperand_ParseFail;
3903 }
3904 GotDfmt = true;
3905 Parser.Lex();
3906 continue;
3907 }
3908 }
3909 if (!GotNfmt) {
3910 auto Res = parseIntWithPrefix("nfmt", Nfmt);
3911 if (Res != MatchOperand_NoMatch) {
3912 if (Res != MatchOperand_Success)
3913 return Res;
3914 if (Nfmt >= 8) {
3915 Error(Parser.getTok().getLoc(), "out of range nfmt");
3916 return MatchOperand_ParseFail;
3917 }
3918 GotNfmt = true;
3919 Parser.Lex();
3920 continue;
3921 }
3922 }
3923 break;
3924 }
3925 if (!GotDfmt && !GotNfmt)
3926 return MatchOperand_NoMatch;
3927 auto Format = Dfmt | Nfmt << 4;
3928 Operands.push_back(
3929 AMDGPUOperand::CreateImm(this, Format, S, AMDGPUOperand::ImmTyFORMAT));
3930 return MatchOperand_Success;
3931}
3932
Tom Stellard45bb48e2015-06-13 03:28:10 +00003933//===----------------------------------------------------------------------===//
3934// ds
3935//===----------------------------------------------------------------------===//
3936
Tom Stellard45bb48e2015-06-13 03:28:10 +00003937void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
3938 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003939 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003940
3941 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3942 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3943
3944 // Add the register arguments
3945 if (Op.isReg()) {
3946 Op.addRegOperands(Inst, 1);
3947 continue;
3948 }
3949
3950 // Handle optional arguments
3951 OptionalIdx[Op.getImmTy()] = i;
3952 }
3953
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003954 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
3955 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003956 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003957
Tom Stellard45bb48e2015-06-13 03:28:10 +00003958 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3959}
3960
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003961void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
3962 bool IsGdsHardcoded) {
3963 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003964
3965 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3966 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3967
3968 // Add the register arguments
3969 if (Op.isReg()) {
3970 Op.addRegOperands(Inst, 1);
3971 continue;
3972 }
3973
3974 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00003975 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003976 continue;
3977 }
3978
3979 // Handle optional arguments
3980 OptionalIdx[Op.getImmTy()] = i;
3981 }
3982
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003983 AMDGPUOperand::ImmTy OffsetType =
3984 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
3985 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
3986 AMDGPUOperand::ImmTyOffset;
3987
3988 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
3989
Artem Tamazov43b61562017-02-03 12:47:30 +00003990 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003991 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003992 }
3993 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3994}
3995
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003996void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
3997 OptionalImmIndexMap OptionalIdx;
3998
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003999 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004000 unsigned EnMask = 0;
4001 int SrcIdx = 0;
4002
4003 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4004 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4005
4006 // Add the register arguments
4007 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004008 assert(SrcIdx < 4);
4009 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004010 Op.addRegOperands(Inst, 1);
4011 ++SrcIdx;
4012 continue;
4013 }
4014
4015 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004016 assert(SrcIdx < 4);
4017 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004018 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004019 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004020 continue;
4021 }
4022
4023 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
4024 Op.addImmOperands(Inst, 1);
4025 continue;
4026 }
4027
4028 if (Op.isToken() && Op.getToken() == "done")
4029 continue;
4030
4031 // Handle optional arguments
4032 OptionalIdx[Op.getImmTy()] = i;
4033 }
4034
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004035 assert(SrcIdx == 4);
4036
4037 bool Compr = false;
4038 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
4039 Compr = true;
4040 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
4041 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
4042 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
4043 }
4044
4045 for (auto i = 0; i < SrcIdx; ++i) {
4046 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
4047 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
4048 }
4049 }
4050
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004051 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
4052 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
4053
4054 Inst.addOperand(MCOperand::createImm(EnMask));
4055}
Tom Stellard45bb48e2015-06-13 03:28:10 +00004056
4057//===----------------------------------------------------------------------===//
4058// s_waitcnt
4059//===----------------------------------------------------------------------===//
4060
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004061static bool
4062encodeCnt(
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004063 const AMDGPU::IsaVersion ISA,
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004064 int64_t &IntVal,
4065 int64_t CntVal,
4066 bool Saturate,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004067 unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned),
4068 unsigned (*decode)(const IsaVersion &Version, unsigned))
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004069{
4070 bool Failed = false;
4071
4072 IntVal = encode(ISA, IntVal, CntVal);
4073 if (CntVal != decode(ISA, IntVal)) {
4074 if (Saturate) {
4075 IntVal = encode(ISA, IntVal, -1);
4076 } else {
4077 Failed = true;
4078 }
4079 }
4080 return Failed;
4081}
4082
Tom Stellard45bb48e2015-06-13 03:28:10 +00004083bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
4084 StringRef CntName = Parser.getTok().getString();
4085 int64_t CntVal;
4086
4087 Parser.Lex();
4088 if (getLexer().isNot(AsmToken::LParen))
4089 return true;
4090
4091 Parser.Lex();
4092 if (getLexer().isNot(AsmToken::Integer))
4093 return true;
4094
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004095 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00004096 if (getParser().parseAbsoluteExpression(CntVal))
4097 return true;
4098
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004099 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Tom Stellard45bb48e2015-06-13 03:28:10 +00004100
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004101 bool Failed = true;
4102 bool Sat = CntName.endswith("_sat");
4103
4104 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
4105 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
4106 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
4107 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
4108 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
4109 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
4110 }
4111
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004112 if (Failed) {
4113 Error(ValLoc, "too large value for " + CntName);
4114 return true;
4115 }
4116
4117 if (getLexer().isNot(AsmToken::RParen)) {
4118 return true;
4119 }
4120
4121 Parser.Lex();
4122 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
4123 const AsmToken NextToken = getLexer().peekTok();
4124 if (NextToken.is(AsmToken::Identifier)) {
4125 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00004126 }
4127 }
4128
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00004129 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004130}
4131
Alex Bradbury58eba092016-11-01 16:32:05 +00004132OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004133AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +00004134 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00004135 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004136 SMLoc S = Parser.getTok().getLoc();
4137
4138 switch(getLexer().getKind()) {
4139 default: return MatchOperand_ParseFail;
4140 case AsmToken::Integer:
4141 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004142 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00004143 return MatchOperand_ParseFail;
4144 break;
4145
4146 case AsmToken::Identifier:
4147 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004148 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00004149 return MatchOperand_ParseFail;
4150 } while(getLexer().isNot(AsmToken::EndOfStatement));
4151 break;
4152 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00004153 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004154 return MatchOperand_Success;
4155}
4156
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004157bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
4158 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004159 using namespace llvm::AMDGPU::Hwreg;
4160
Artem Tamazovd6468662016-04-25 14:13:51 +00004161 if (Parser.getTok().getString() != "hwreg")
4162 return true;
4163 Parser.Lex();
4164
4165 if (getLexer().isNot(AsmToken::LParen))
4166 return true;
4167 Parser.Lex();
4168
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004169 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004170 HwReg.IsSymbolic = true;
4171 HwReg.Id = ID_UNKNOWN_;
4172 const StringRef tok = Parser.getTok().getString();
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00004173 int Last = ID_SYMBOLIC_LAST_;
4174 if (isSI() || isCI() || isVI())
4175 Last = ID_SYMBOLIC_FIRST_GFX9_;
4176 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004177 if (tok == IdSymbolic[i]) {
4178 HwReg.Id = i;
4179 break;
4180 }
4181 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004182 Parser.Lex();
4183 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004184 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004185 if (getLexer().isNot(AsmToken::Integer))
4186 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004187 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004188 return true;
4189 }
Artem Tamazovd6468662016-04-25 14:13:51 +00004190
4191 if (getLexer().is(AsmToken::RParen)) {
4192 Parser.Lex();
4193 return false;
4194 }
4195
4196 // optional params
4197 if (getLexer().isNot(AsmToken::Comma))
4198 return true;
4199 Parser.Lex();
4200
4201 if (getLexer().isNot(AsmToken::Integer))
4202 return true;
4203 if (getParser().parseAbsoluteExpression(Offset))
4204 return true;
4205
4206 if (getLexer().isNot(AsmToken::Comma))
4207 return true;
4208 Parser.Lex();
4209
4210 if (getLexer().isNot(AsmToken::Integer))
4211 return true;
4212 if (getParser().parseAbsoluteExpression(Width))
4213 return true;
4214
4215 if (getLexer().isNot(AsmToken::RParen))
4216 return true;
4217 Parser.Lex();
4218
4219 return false;
4220}
4221
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00004222OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004223 using namespace llvm::AMDGPU::Hwreg;
4224
Artem Tamazovd6468662016-04-25 14:13:51 +00004225 int64_t Imm16Val = 0;
4226 SMLoc S = Parser.getTok().getLoc();
4227
4228 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00004229 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00004230 case AsmToken::Integer:
4231 // The operand can be an integer value.
4232 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00004233 return MatchOperand_NoMatch;
4234 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00004235 Error(S, "invalid immediate: only 16-bit values are legal");
4236 // Do not return error code, but create an imm operand anyway and proceed
4237 // to the next operand, if any. That avoids unneccessary error messages.
4238 }
4239 break;
4240
4241 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00004242 OperandInfoTy HwReg(ID_UNKNOWN_);
4243 int64_t Offset = OFFSET_DEFAULT_;
4244 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
4245 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00004246 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004247 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
4248 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00004249 Error(S, "invalid symbolic name of hardware register");
4250 else
4251 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00004252 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004253 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00004254 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004255 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00004256 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00004257 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00004258 }
4259 break;
4260 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004261 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00004262 return MatchOperand_Success;
4263}
4264
Tom Stellard45bb48e2015-06-13 03:28:10 +00004265bool AMDGPUOperand::isSWaitCnt() const {
4266 return isImm();
4267}
4268
Artem Tamazovd6468662016-04-25 14:13:51 +00004269bool AMDGPUOperand::isHwreg() const {
4270 return isImmTy(ImmTyHwreg);
4271}
4272
Artem Tamazov6edc1352016-05-26 17:00:33 +00004273bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004274 using namespace llvm::AMDGPU::SendMsg;
4275
4276 if (Parser.getTok().getString() != "sendmsg")
4277 return true;
4278 Parser.Lex();
4279
4280 if (getLexer().isNot(AsmToken::LParen))
4281 return true;
4282 Parser.Lex();
4283
4284 if (getLexer().is(AsmToken::Identifier)) {
4285 Msg.IsSymbolic = true;
4286 Msg.Id = ID_UNKNOWN_;
4287 const std::string tok = Parser.getTok().getString();
4288 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
4289 switch(i) {
4290 default: continue; // Omit gaps.
4291 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
4292 }
4293 if (tok == IdSymbolic[i]) {
4294 Msg.Id = i;
4295 break;
4296 }
4297 }
4298 Parser.Lex();
4299 } else {
4300 Msg.IsSymbolic = false;
4301 if (getLexer().isNot(AsmToken::Integer))
4302 return true;
4303 if (getParser().parseAbsoluteExpression(Msg.Id))
4304 return true;
4305 if (getLexer().is(AsmToken::Integer))
4306 if (getParser().parseAbsoluteExpression(Msg.Id))
4307 Msg.Id = ID_UNKNOWN_;
4308 }
4309 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
4310 return false;
4311
4312 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
4313 if (getLexer().isNot(AsmToken::RParen))
4314 return true;
4315 Parser.Lex();
4316 return false;
4317 }
4318
4319 if (getLexer().isNot(AsmToken::Comma))
4320 return true;
4321 Parser.Lex();
4322
4323 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
4324 Operation.Id = ID_UNKNOWN_;
4325 if (getLexer().is(AsmToken::Identifier)) {
4326 Operation.IsSymbolic = true;
4327 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
4328 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
4329 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004330 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004331 for (int i = F; i < L; ++i) {
4332 if (Tok == S[i]) {
4333 Operation.Id = i;
4334 break;
4335 }
4336 }
4337 Parser.Lex();
4338 } else {
4339 Operation.IsSymbolic = false;
4340 if (getLexer().isNot(AsmToken::Integer))
4341 return true;
4342 if (getParser().parseAbsoluteExpression(Operation.Id))
4343 return true;
4344 }
4345
4346 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4347 // Stream id is optional.
4348 if (getLexer().is(AsmToken::RParen)) {
4349 Parser.Lex();
4350 return false;
4351 }
4352
4353 if (getLexer().isNot(AsmToken::Comma))
4354 return true;
4355 Parser.Lex();
4356
4357 if (getLexer().isNot(AsmToken::Integer))
4358 return true;
4359 if (getParser().parseAbsoluteExpression(StreamId))
4360 return true;
4361 }
4362
4363 if (getLexer().isNot(AsmToken::RParen))
4364 return true;
4365 Parser.Lex();
4366 return false;
4367}
4368
Matt Arsenault0e8a2992016-12-15 20:40:20 +00004369OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
4370 if (getLexer().getKind() != AsmToken::Identifier)
4371 return MatchOperand_NoMatch;
4372
4373 StringRef Str = Parser.getTok().getString();
4374 int Slot = StringSwitch<int>(Str)
4375 .Case("p10", 0)
4376 .Case("p20", 1)
4377 .Case("p0", 2)
4378 .Default(-1);
4379
4380 SMLoc S = Parser.getTok().getLoc();
4381 if (Slot == -1)
4382 return MatchOperand_ParseFail;
4383
4384 Parser.Lex();
4385 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
4386 AMDGPUOperand::ImmTyInterpSlot));
4387 return MatchOperand_Success;
4388}
4389
4390OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
4391 if (getLexer().getKind() != AsmToken::Identifier)
4392 return MatchOperand_NoMatch;
4393
4394 StringRef Str = Parser.getTok().getString();
4395 if (!Str.startswith("attr"))
4396 return MatchOperand_NoMatch;
4397
4398 StringRef Chan = Str.take_back(2);
4399 int AttrChan = StringSwitch<int>(Chan)
4400 .Case(".x", 0)
4401 .Case(".y", 1)
4402 .Case(".z", 2)
4403 .Case(".w", 3)
4404 .Default(-1);
4405 if (AttrChan == -1)
4406 return MatchOperand_ParseFail;
4407
4408 Str = Str.drop_back(2).drop_front(4);
4409
4410 uint8_t Attr;
4411 if (Str.getAsInteger(10, Attr))
4412 return MatchOperand_ParseFail;
4413
4414 SMLoc S = Parser.getTok().getLoc();
4415 Parser.Lex();
4416 if (Attr > 63) {
4417 Error(S, "out of bounds attr");
4418 return MatchOperand_Success;
4419 }
4420
4421 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
4422
4423 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
4424 AMDGPUOperand::ImmTyInterpAttr));
4425 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
4426 AMDGPUOperand::ImmTyAttrChan));
4427 return MatchOperand_Success;
4428}
4429
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004430void AMDGPUAsmParser::errorExpTgt() {
4431 Error(Parser.getTok().getLoc(), "invalid exp target");
4432}
4433
4434OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
4435 uint8_t &Val) {
4436 if (Str == "null") {
4437 Val = 9;
4438 return MatchOperand_Success;
4439 }
4440
4441 if (Str.startswith("mrt")) {
4442 Str = Str.drop_front(3);
4443 if (Str == "z") { // == mrtz
4444 Val = 8;
4445 return MatchOperand_Success;
4446 }
4447
4448 if (Str.getAsInteger(10, Val))
4449 return MatchOperand_ParseFail;
4450
4451 if (Val > 7)
4452 errorExpTgt();
4453
4454 return MatchOperand_Success;
4455 }
4456
4457 if (Str.startswith("pos")) {
4458 Str = Str.drop_front(3);
4459 if (Str.getAsInteger(10, Val))
4460 return MatchOperand_ParseFail;
4461
4462 if (Val > 3)
4463 errorExpTgt();
4464
4465 Val += 12;
4466 return MatchOperand_Success;
4467 }
4468
4469 if (Str.startswith("param")) {
4470 Str = Str.drop_front(5);
4471 if (Str.getAsInteger(10, Val))
4472 return MatchOperand_ParseFail;
4473
4474 if (Val >= 32)
4475 errorExpTgt();
4476
4477 Val += 32;
4478 return MatchOperand_Success;
4479 }
4480
4481 if (Str.startswith("invalid_target_")) {
4482 Str = Str.drop_front(15);
4483 if (Str.getAsInteger(10, Val))
4484 return MatchOperand_ParseFail;
4485
4486 errorExpTgt();
4487 return MatchOperand_Success;
4488 }
4489
4490 return MatchOperand_NoMatch;
4491}
4492
4493OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
4494 uint8_t Val;
4495 StringRef Str = Parser.getTok().getString();
4496
4497 auto Res = parseExpTgtImpl(Str, Val);
4498 if (Res != MatchOperand_Success)
4499 return Res;
4500
4501 SMLoc S = Parser.getTok().getLoc();
4502 Parser.Lex();
4503
4504 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
4505 AMDGPUOperand::ImmTyExpTgt));
4506 return MatchOperand_Success;
4507}
4508
Alex Bradbury58eba092016-11-01 16:32:05 +00004509OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004510AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
4511 using namespace llvm::AMDGPU::SendMsg;
4512
4513 int64_t Imm16Val = 0;
4514 SMLoc S = Parser.getTok().getLoc();
4515
4516 switch(getLexer().getKind()) {
4517 default:
4518 return MatchOperand_NoMatch;
4519 case AsmToken::Integer:
4520 // The operand can be an integer value.
4521 if (getParser().parseAbsoluteExpression(Imm16Val))
4522 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004523 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004524 Error(S, "invalid immediate: only 16-bit values are legal");
4525 // Do not return error code, but create an imm operand anyway and proceed
4526 // to the next operand, if any. That avoids unneccessary error messages.
4527 }
4528 break;
4529 case AsmToken::Identifier: {
4530 OperandInfoTy Msg(ID_UNKNOWN_);
4531 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00004532 int64_t StreamId = STREAM_ID_DEFAULT_;
4533 if (parseSendMsgConstruct(Msg, Operation, StreamId))
4534 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004535 do {
4536 // Validate and encode message ID.
4537 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
4538 || Msg.Id == ID_SYSMSG)) {
4539 if (Msg.IsSymbolic)
4540 Error(S, "invalid/unsupported symbolic name of message");
4541 else
4542 Error(S, "invalid/unsupported code of message");
4543 break;
4544 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004545 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004546 // Validate and encode operation ID.
4547 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
4548 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
4549 if (Operation.IsSymbolic)
4550 Error(S, "invalid symbolic name of GS_OP");
4551 else
4552 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
4553 break;
4554 }
4555 if (Operation.Id == OP_GS_NOP
4556 && Msg.Id != ID_GS_DONE) {
4557 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
4558 break;
4559 }
4560 Imm16Val |= (Operation.Id << OP_SHIFT_);
4561 }
4562 if (Msg.Id == ID_SYSMSG) {
4563 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
4564 if (Operation.IsSymbolic)
4565 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
4566 else
4567 Error(S, "invalid/unsupported code of SYSMSG_OP");
4568 break;
4569 }
4570 Imm16Val |= (Operation.Id << OP_SHIFT_);
4571 }
4572 // Validate and encode stream ID.
4573 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4574 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
4575 Error(S, "invalid stream id: only 2-bit values are legal");
4576 break;
4577 }
4578 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
4579 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004580 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004581 }
4582 break;
4583 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004584 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004585 return MatchOperand_Success;
4586}
4587
4588bool AMDGPUOperand::isSendMsg() const {
4589 return isImmTy(ImmTySendMsg);
4590}
4591
Tom Stellard45bb48e2015-06-13 03:28:10 +00004592//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004593// parser helpers
4594//===----------------------------------------------------------------------===//
4595
4596bool
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004597AMDGPUAsmParser::isId(const AsmToken &Token, const StringRef Id) const {
4598 return Token.is(AsmToken::Identifier) && Token.getString() == Id;
4599}
4600
4601bool
4602AMDGPUAsmParser::isId(const StringRef Id) const {
4603 return isId(getToken(), Id);
4604}
4605
4606bool
4607AMDGPUAsmParser::isToken(const AsmToken::TokenKind Kind) const {
4608 return getTokenKind() == Kind;
4609}
4610
4611bool
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004612AMDGPUAsmParser::trySkipId(const StringRef Id) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004613 if (isId(Id)) {
4614 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004615 return true;
4616 }
4617 return false;
4618}
4619
4620bool
4621AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004622 if (isToken(Kind)) {
4623 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004624 return true;
4625 }
4626 return false;
4627}
4628
4629bool
4630AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
4631 const StringRef ErrMsg) {
4632 if (!trySkipToken(Kind)) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004633 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004634 return false;
4635 }
4636 return true;
4637}
4638
4639bool
4640AMDGPUAsmParser::parseExpr(int64_t &Imm) {
4641 return !getParser().parseAbsoluteExpression(Imm);
4642}
4643
4644bool
4645AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004646 if (isToken(AsmToken::String)) {
4647 Val = getToken().getStringContents();
4648 lex();
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004649 return true;
4650 } else {
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004651 Error(getLoc(), ErrMsg);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004652 return false;
4653 }
4654}
4655
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004656AsmToken
4657AMDGPUAsmParser::getToken() const {
4658 return Parser.getTok();
4659}
4660
4661AsmToken
4662AMDGPUAsmParser::peekToken() {
4663 return getLexer().peekTok();
4664}
4665
Dmitry Preobrazhenskye2707f52019-04-22 14:35:47 +00004666void
4667AMDGPUAsmParser::peekTokens(MutableArrayRef<AsmToken> Tokens) {
4668 auto TokCount = getLexer().peekTokens(Tokens);
4669
4670 for (auto Idx = TokCount; Idx < Tokens.size(); ++Idx)
4671 Tokens[Idx] = AsmToken(AsmToken::Error, "");
4672}
4673
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004674AsmToken::TokenKind
4675AMDGPUAsmParser::getTokenKind() const {
4676 return getLexer().getKind();
4677}
4678
4679SMLoc
4680AMDGPUAsmParser::getLoc() const {
4681 return getToken().getLoc();
4682}
4683
Dmitry Preobrazhensky394d0a12019-04-17 16:56:34 +00004684StringRef
4685AMDGPUAsmParser::getTokenStr() const {
4686 return getToken().getString();
4687}
4688
Dmitry Preobrazhensky20d52e32019-04-17 14:44:01 +00004689void
4690AMDGPUAsmParser::lex() {
4691 Parser.Lex();
4692}
4693
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004694//===----------------------------------------------------------------------===//
4695// swizzle
4696//===----------------------------------------------------------------------===//
4697
4698LLVM_READNONE
4699static unsigned
4700encodeBitmaskPerm(const unsigned AndMask,
4701 const unsigned OrMask,
4702 const unsigned XorMask) {
4703 using namespace llvm::AMDGPU::Swizzle;
4704
4705 return BITMASK_PERM_ENC |
4706 (AndMask << BITMASK_AND_SHIFT) |
4707 (OrMask << BITMASK_OR_SHIFT) |
4708 (XorMask << BITMASK_XOR_SHIFT);
4709}
4710
4711bool
4712AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
4713 const unsigned MinVal,
4714 const unsigned MaxVal,
4715 const StringRef ErrMsg) {
4716 for (unsigned i = 0; i < OpNum; ++i) {
4717 if (!skipToken(AsmToken::Comma, "expected a comma")){
4718 return false;
4719 }
4720 SMLoc ExprLoc = Parser.getTok().getLoc();
4721 if (!parseExpr(Op[i])) {
4722 return false;
4723 }
4724 if (Op[i] < MinVal || Op[i] > MaxVal) {
4725 Error(ExprLoc, ErrMsg);
4726 return false;
4727 }
4728 }
4729
4730 return true;
4731}
4732
4733bool
4734AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
4735 using namespace llvm::AMDGPU::Swizzle;
4736
4737 int64_t Lane[LANE_NUM];
4738 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
4739 "expected a 2-bit lane id")) {
4740 Imm = QUAD_PERM_ENC;
Stanislav Mekhanoshin266f1572019-03-11 16:49:32 +00004741 for (unsigned I = 0; I < LANE_NUM; ++I) {
4742 Imm |= Lane[I] << (LANE_SHIFT * I);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004743 }
4744 return true;
4745 }
4746 return false;
4747}
4748
4749bool
4750AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
4751 using namespace llvm::AMDGPU::Swizzle;
4752
4753 SMLoc S = Parser.getTok().getLoc();
4754 int64_t GroupSize;
4755 int64_t LaneIdx;
4756
4757 if (!parseSwizzleOperands(1, &GroupSize,
4758 2, 32,
4759 "group size must be in the interval [2,32]")) {
4760 return false;
4761 }
4762 if (!isPowerOf2_64(GroupSize)) {
4763 Error(S, "group size must be a power of two");
4764 return false;
4765 }
4766 if (parseSwizzleOperands(1, &LaneIdx,
4767 0, GroupSize - 1,
4768 "lane id must be in the interval [0,group size - 1]")) {
4769 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
4770 return true;
4771 }
4772 return false;
4773}
4774
4775bool
4776AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
4777 using namespace llvm::AMDGPU::Swizzle;
4778
4779 SMLoc S = Parser.getTok().getLoc();
4780 int64_t GroupSize;
4781
4782 if (!parseSwizzleOperands(1, &GroupSize,
4783 2, 32, "group size must be in the interval [2,32]")) {
4784 return false;
4785 }
4786 if (!isPowerOf2_64(GroupSize)) {
4787 Error(S, "group size must be a power of two");
4788 return false;
4789 }
4790
4791 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
4792 return true;
4793}
4794
4795bool
4796AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
4797 using namespace llvm::AMDGPU::Swizzle;
4798
4799 SMLoc S = Parser.getTok().getLoc();
4800 int64_t GroupSize;
4801
4802 if (!parseSwizzleOperands(1, &GroupSize,
4803 1, 16, "group size must be in the interval [1,16]")) {
4804 return false;
4805 }
4806 if (!isPowerOf2_64(GroupSize)) {
4807 Error(S, "group size must be a power of two");
4808 return false;
4809 }
4810
4811 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
4812 return true;
4813}
4814
4815bool
4816AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
4817 using namespace llvm::AMDGPU::Swizzle;
4818
4819 if (!skipToken(AsmToken::Comma, "expected a comma")) {
4820 return false;
4821 }
4822
4823 StringRef Ctl;
4824 SMLoc StrLoc = Parser.getTok().getLoc();
4825 if (!parseString(Ctl)) {
4826 return false;
4827 }
4828 if (Ctl.size() != BITMASK_WIDTH) {
4829 Error(StrLoc, "expected a 5-character mask");
4830 return false;
4831 }
4832
4833 unsigned AndMask = 0;
4834 unsigned OrMask = 0;
4835 unsigned XorMask = 0;
4836
4837 for (size_t i = 0; i < Ctl.size(); ++i) {
4838 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
4839 switch(Ctl[i]) {
4840 default:
4841 Error(StrLoc, "invalid mask");
4842 return false;
4843 case '0':
4844 break;
4845 case '1':
4846 OrMask |= Mask;
4847 break;
4848 case 'p':
4849 AndMask |= Mask;
4850 break;
4851 case 'i':
4852 AndMask |= Mask;
4853 XorMask |= Mask;
4854 break;
4855 }
4856 }
4857
4858 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
4859 return true;
4860}
4861
4862bool
4863AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
4864
4865 SMLoc OffsetLoc = Parser.getTok().getLoc();
4866
4867 if (!parseExpr(Imm)) {
4868 return false;
4869 }
4870 if (!isUInt<16>(Imm)) {
4871 Error(OffsetLoc, "expected a 16-bit offset");
4872 return false;
4873 }
4874 return true;
4875}
4876
4877bool
4878AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
4879 using namespace llvm::AMDGPU::Swizzle;
4880
4881 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
4882
4883 SMLoc ModeLoc = Parser.getTok().getLoc();
4884 bool Ok = false;
4885
4886 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
4887 Ok = parseSwizzleQuadPerm(Imm);
4888 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
4889 Ok = parseSwizzleBitmaskPerm(Imm);
4890 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
4891 Ok = parseSwizzleBroadcast(Imm);
4892 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
4893 Ok = parseSwizzleSwap(Imm);
4894 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
4895 Ok = parseSwizzleReverse(Imm);
4896 } else {
4897 Error(ModeLoc, "expected a swizzle mode");
4898 }
4899
4900 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
4901 }
4902
4903 return false;
4904}
4905
4906OperandMatchResultTy
4907AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
4908 SMLoc S = Parser.getTok().getLoc();
4909 int64_t Imm = 0;
4910
4911 if (trySkipId("offset")) {
4912
4913 bool Ok = false;
4914 if (skipToken(AsmToken::Colon, "expected a colon")) {
4915 if (trySkipId("swizzle")) {
4916 Ok = parseSwizzleMacro(Imm);
4917 } else {
4918 Ok = parseSwizzleOffset(Imm);
4919 }
4920 }
4921
4922 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
4923
4924 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
4925 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00004926 // Swizzle "offset" operand is optional.
4927 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004928 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004929 }
4930}
4931
4932bool
4933AMDGPUOperand::isSwizzle() const {
4934 return isImmTy(ImmTySwizzle);
4935}
4936
4937//===----------------------------------------------------------------------===//
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00004938// VGPR Index Mode
4939//===----------------------------------------------------------------------===//
4940
4941int64_t AMDGPUAsmParser::parseGPRIdxMacro() {
4942
4943 using namespace llvm::AMDGPU::VGPRIndexMode;
4944
4945 if (trySkipToken(AsmToken::RParen)) {
4946 return OFF;
4947 }
4948
4949 int64_t Imm = 0;
4950
4951 while (true) {
4952 unsigned Mode = 0;
4953 SMLoc S = Parser.getTok().getLoc();
4954
4955 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
4956 if (trySkipId(IdSymbolic[ModeId])) {
4957 Mode = 1 << ModeId;
4958 break;
4959 }
4960 }
4961
4962 if (Mode == 0) {
4963 Error(S, (Imm == 0)?
4964 "expected a VGPR index mode or a closing parenthesis" :
4965 "expected a VGPR index mode");
4966 break;
4967 }
4968
4969 if (Imm & Mode) {
4970 Error(S, "duplicate VGPR index mode");
4971 break;
4972 }
4973 Imm |= Mode;
4974
4975 if (trySkipToken(AsmToken::RParen))
4976 break;
4977 if (!skipToken(AsmToken::Comma,
4978 "expected a comma or a closing parenthesis"))
4979 break;
4980 }
4981
4982 return Imm;
4983}
4984
4985OperandMatchResultTy
4986AMDGPUAsmParser::parseGPRIdxMode(OperandVector &Operands) {
4987
4988 int64_t Imm = 0;
4989 SMLoc S = Parser.getTok().getLoc();
4990
4991 if (getLexer().getKind() == AsmToken::Identifier &&
4992 Parser.getTok().getString() == "gpr_idx" &&
4993 getLexer().peekTok().is(AsmToken::LParen)) {
4994
4995 Parser.Lex();
4996 Parser.Lex();
4997
4998 // If parse failed, trigger an error but do not return error code
4999 // to avoid excessive error messages.
5000 Imm = parseGPRIdxMacro();
5001
5002 } else {
5003 if (getParser().parseAbsoluteExpression(Imm))
5004 return MatchOperand_NoMatch;
5005 if (Imm < 0 || !isUInt<4>(Imm)) {
5006 Error(S, "invalid immediate: only 4-bit values are legal");
5007 }
5008 }
5009
5010 Operands.push_back(
5011 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyGprIdxMode));
5012 return MatchOperand_Success;
5013}
5014
5015bool AMDGPUOperand::isGPRIdxMode() const {
5016 return isImmTy(ImmTyGprIdxMode);
5017}
5018
5019//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005020// sopp branch targets
5021//===----------------------------------------------------------------------===//
5022
Alex Bradbury58eba092016-11-01 16:32:05 +00005023OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00005024AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
5025 SMLoc S = Parser.getTok().getLoc();
5026
5027 switch (getLexer().getKind()) {
5028 default: return MatchOperand_ParseFail;
5029 case AsmToken::Integer: {
5030 int64_t Imm;
5031 if (getParser().parseAbsoluteExpression(Imm))
5032 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005033 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00005034 return MatchOperand_Success;
5035 }
5036
5037 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005038 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00005039 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
5040 Parser.getTok().getString()), getContext()), S));
5041 Parser.Lex();
5042 return MatchOperand_Success;
5043 }
5044}
5045
5046//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005047// mubuf
5048//===----------------------------------------------------------------------===//
5049
Sam Kolton5f10a132016-05-06 11:31:17 +00005050AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005051 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005052}
5053
5054AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005055 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00005056}
5057
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005058void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
5059 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005060 bool IsAtomic,
5061 bool IsAtomicReturn,
5062 bool IsLds) {
5063 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005064 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005065 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005066 assert(IsAtomicReturn ? IsAtomic : true);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005067 unsigned FirstOperandIdx = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005068
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005069 for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00005070 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5071
5072 // Add the register arguments
5073 if (Op.isReg()) {
5074 Op.addRegOperands(Inst, 1);
Dmitry Preobrazhensky7f335742019-03-29 12:16:04 +00005075 // Insert a tied src for atomic return dst.
5076 // This cannot be postponed as subsequent calls to
5077 // addImmOperands rely on correct number of MC operands.
5078 if (IsAtomicReturn && i == FirstOperandIdx)
5079 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00005080 continue;
5081 }
5082
5083 // Handle the case where soffset is an immediate
5084 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5085 Op.addImmOperands(Inst, 1);
5086 continue;
5087 }
5088
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005089 HasLdsModifier = Op.isLDS();
5090
Tom Stellard45bb48e2015-06-13 03:28:10 +00005091 // Handle tokens like 'offen' which are sometimes hard-coded into the
5092 // asm string. There are no MCInst operands for these.
5093 if (Op.isToken()) {
5094 continue;
5095 }
5096 assert(Op.isImm());
5097
5098 // Handle optional arguments
5099 OptionalIdx[Op.getImmTy()] = i;
5100 }
5101
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005102 // This is a workaround for an llvm quirk which may result in an
5103 // incorrect instruction selection. Lds and non-lds versions of
5104 // MUBUF instructions are identical except that lds versions
5105 // have mandatory 'lds' modifier. However this modifier follows
5106 // optional modifiers and llvm asm matcher regards this 'lds'
5107 // modifier as an optional one. As a result, an lds version
5108 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005109 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005110 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
5111 if (NoLdsOpcode != -1) { // Got lds version - correct it.
5112 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005113 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005114 }
5115 }
5116
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005117 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00005118 if (!IsAtomic) { // glc is hard-coded.
5119 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5120 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00005121 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005122
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00005123 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005124 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5125 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005126}
5127
David Stuttard70e8bc12017-06-22 16:29:22 +00005128void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
5129 OptionalImmIndexMap OptionalIdx;
5130
5131 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
5132 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
5133
5134 // Add the register arguments
5135 if (Op.isReg()) {
5136 Op.addRegOperands(Inst, 1);
5137 continue;
5138 }
5139
5140 // Handle the case where soffset is an immediate
5141 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
5142 Op.addImmOperands(Inst, 1);
5143 continue;
5144 }
5145
5146 // Handle tokens like 'offen' which are sometimes hard-coded into the
5147 // asm string. There are no MCInst operands for these.
5148 if (Op.isToken()) {
5149 continue;
5150 }
5151 assert(Op.isImm());
5152
5153 // Handle optional arguments
5154 OptionalIdx[Op.getImmTy()] = i;
5155 }
5156
5157 addOptionalImmOperand(Inst, Operands, OptionalIdx,
5158 AMDGPUOperand::ImmTyOffset);
Tim Renouf35484c92018-08-21 11:06:05 +00005159 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT);
David Stuttard70e8bc12017-06-22 16:29:22 +00005160 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
5161 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
5162 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5163}
5164
Tom Stellard45bb48e2015-06-13 03:28:10 +00005165//===----------------------------------------------------------------------===//
5166// mimg
5167//===----------------------------------------------------------------------===//
5168
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005169void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
5170 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00005171 unsigned I = 1;
5172 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5173 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5174 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5175 }
5176
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005177 if (IsAtomic) {
5178 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005179 assert(Desc.getNumDefs() == 1);
5180 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005181 }
5182
Sam Kolton1bdcef72016-05-23 09:59:02 +00005183 OptionalImmIndexMap OptionalIdx;
5184
5185 for (unsigned E = Operands.size(); I != E; ++I) {
5186 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5187
5188 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005189 if (Op.isReg()) {
5190 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005191 } else if (Op.isImmModifier()) {
5192 OptionalIdx[Op.getImmTy()] = I;
5193 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00005194 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00005195 }
5196 }
5197
5198 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
5199 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
5200 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005201 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005202 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005203 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
5204 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00005205 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005206 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005207}
5208
5209void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005210 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00005211}
5212
Tom Stellard45bb48e2015-06-13 03:28:10 +00005213//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00005214// smrd
5215//===----------------------------------------------------------------------===//
5216
Artem Tamazov54bfd542016-10-31 16:07:39 +00005217bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00005218 return isImm() && isUInt<8>(getImm());
5219}
5220
Artem Tamazov54bfd542016-10-31 16:07:39 +00005221bool AMDGPUOperand::isSMRDOffset20() const {
5222 return isImm() && isUInt<20>(getImm());
5223}
5224
Tom Stellard217361c2015-08-06 19:28:38 +00005225bool AMDGPUOperand::isSMRDLiteralOffset() const {
5226 // 32-bit literals are only supported on CI and we only want to use them
5227 // when the offset is > 8-bits.
5228 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
5229}
5230
Artem Tamazov54bfd542016-10-31 16:07:39 +00005231AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
5232 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5233}
5234
5235AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005236 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005237}
5238
5239AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005240 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00005241}
5242
Matt Arsenaultfd023142017-06-12 15:55:58 +00005243AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
5244 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5245}
5246
Matt Arsenault9698f1c2017-06-20 19:54:14 +00005247AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
5248 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
5249}
5250
Tom Stellard217361c2015-08-06 19:28:38 +00005251//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00005252// vop3
5253//===----------------------------------------------------------------------===//
5254
5255static bool ConvertOmodMul(int64_t &Mul) {
5256 if (Mul != 1 && Mul != 2 && Mul != 4)
5257 return false;
5258
5259 Mul >>= 1;
5260 return true;
5261}
5262
5263static bool ConvertOmodDiv(int64_t &Div) {
5264 if (Div == 1) {
5265 Div = 0;
5266 return true;
5267 }
5268
5269 if (Div == 2) {
5270 Div = 3;
5271 return true;
5272 }
5273
5274 return false;
5275}
5276
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005277static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
5278 if (BoundCtrl == 0) {
5279 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005280 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00005281 }
5282
5283 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005284 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005285 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005286 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005287
Tom Stellard45bb48e2015-06-13 03:28:10 +00005288 return false;
5289}
5290
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005291// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00005292static const OptionalOperand AMDGPUOptionalOperandTable[] = {
5293 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
5294 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
5295 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
5296 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
5297 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
5298 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005299 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005300 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00005301 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
Tim Renouf35484c92018-08-21 11:06:05 +00005302 {"dfmt", AMDGPUOperand::ImmTyFORMAT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005303 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
5304 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
5305 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00005306 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005307 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005308 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
5309 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
5310 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
5311 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
Ryan Taylor1f334d02018-08-28 15:07:30 +00005312 {"r128", AMDGPUOperand::ImmTyR128A16, true, nullptr},
5313 {"a16", AMDGPUOperand::ImmTyR128A16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005314 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00005315 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005316 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
5317 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
5318 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
5319 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00005320 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
5321 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
5322 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00005323 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00005324 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005325 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005326 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
5327 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
5328 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
5329 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005330};
Tom Stellard45bb48e2015-06-13 03:28:10 +00005331
Alex Bradbury58eba092016-11-01 16:32:05 +00005332OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00005333 unsigned size = Operands.size();
5334 assert(size > 0);
5335
5336 OperandMatchResultTy res = parseOptionalOpr(Operands);
5337
5338 // This is a hack to enable hardcoded mandatory operands which follow
5339 // optional operands.
5340 //
5341 // Current design assumes that all operands after the first optional operand
5342 // are also optional. However implementation of some instructions violates
5343 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
5344 //
5345 // To alleviate this problem, we have to (implicitly) parse extra operands
5346 // to make sure autogenerated parser of custom operands never hit hardcoded
5347 // mandatory operands.
5348
5349 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
5350
5351 // We have parsed the first optional operand.
5352 // Parse as many operands as necessary to skip all mandatory operands.
5353
5354 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
5355 if (res != MatchOperand_Success ||
5356 getLexer().is(AsmToken::EndOfStatement)) break;
5357 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
5358 res = parseOptionalOpr(Operands);
5359 }
5360 }
5361
5362 return res;
5363}
5364
5365OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00005366 OperandMatchResultTy res;
5367 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
5368 // try to parse any optional operand here
5369 if (Op.IsBit) {
5370 res = parseNamedBit(Op.Name, Operands, Op.Type);
5371 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
5372 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005373 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
5374 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
5375 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
5376 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00005377 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
5378 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005379 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
5380 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
5381 Op.Type == AMDGPUOperand::ImmTyNegLo ||
5382 Op.Type == AMDGPUOperand::ImmTyNegHi) {
5383 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
5384 Op.ConvertResult);
Tim Renouf35484c92018-08-21 11:06:05 +00005385 } else if (Op.Type == AMDGPUOperand::ImmTyFORMAT) {
5386 res = parseDfmtNfmt(Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00005387 } else {
5388 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
5389 }
5390 if (res != MatchOperand_NoMatch) {
5391 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00005392 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005393 }
5394 return MatchOperand_NoMatch;
5395}
5396
Matt Arsenault12c53892016-11-15 19:58:54 +00005397OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005398 StringRef Name = Parser.getTok().getString();
5399 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00005400 return parseIntWithPrefix("mul", Operands,
5401 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005402 }
Matt Arsenault12c53892016-11-15 19:58:54 +00005403
5404 if (Name == "div") {
5405 return parseIntWithPrefix("div", Operands,
5406 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
5407 }
5408
5409 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00005410}
5411
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005412void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
5413 cvtVOP3P(Inst, Operands);
5414
5415 int Opc = Inst.getOpcode();
5416
5417 int SrcNum;
5418 const int Ops[] = { AMDGPU::OpName::src0,
5419 AMDGPU::OpName::src1,
5420 AMDGPU::OpName::src2 };
5421 for (SrcNum = 0;
5422 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
5423 ++SrcNum);
5424 assert(SrcNum > 0);
5425
5426 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
5427 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
5428
5429 if ((OpSel & (1 << SrcNum)) != 0) {
5430 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
5431 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
5432 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
5433 }
5434}
5435
Sam Koltona3ec5c12016-10-07 14:46:06 +00005436static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
5437 // 1. This operand is input modifiers
5438 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
5439 // 2. This is not last operand
5440 && Desc.NumOperands > (OpNum + 1)
5441 // 3. Next operand is register class
5442 && Desc.OpInfo[OpNum + 1].RegClass != -1
5443 // 4. Next register is not tied to any other operand
5444 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
5445}
5446
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005447void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
5448{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00005449 OptionalImmIndexMap OptionalIdx;
5450 unsigned Opc = Inst.getOpcode();
5451
5452 unsigned I = 1;
5453 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5454 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5455 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5456 }
5457
5458 for (unsigned E = Operands.size(); I != E; ++I) {
5459 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5460 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5461 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5462 } else if (Op.isInterpSlot() ||
5463 Op.isInterpAttr() ||
5464 Op.isAttrChan()) {
5465 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
5466 } else if (Op.isImmModifier()) {
5467 OptionalIdx[Op.getImmTy()] = I;
5468 } else {
5469 llvm_unreachable("unhandled operand type");
5470 }
5471 }
5472
5473 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
5474 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
5475 }
5476
5477 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5478 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5479 }
5480
5481 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5482 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5483 }
5484}
5485
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005486void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
5487 OptionalImmIndexMap &OptionalIdx) {
5488 unsigned Opc = Inst.getOpcode();
5489
Tom Stellarda90b9522016-02-11 03:28:15 +00005490 unsigned I = 1;
5491 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00005492 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00005493 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00005494 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005495
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005496 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
5497 // This instruction has src modifiers
5498 for (unsigned E = Operands.size(); I != E; ++I) {
5499 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5500 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
5501 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
5502 } else if (Op.isImmModifier()) {
5503 OptionalIdx[Op.getImmTy()] = I;
5504 } else if (Op.isRegOrImm()) {
5505 Op.addRegOrImmOperands(Inst, 1);
5506 } else {
5507 llvm_unreachable("unhandled operand type");
5508 }
5509 }
5510 } else {
5511 // No src modifiers
5512 for (unsigned E = Operands.size(); I != E; ++I) {
5513 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5514 if (Op.isMod()) {
5515 OptionalIdx[Op.getImmTy()] = I;
5516 } else {
5517 Op.addRegOrImmOperands(Inst, 1);
5518 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005519 }
Tom Stellarda90b9522016-02-11 03:28:15 +00005520 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005521
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005522 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
5523 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
5524 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005525
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005526 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
5527 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
5528 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00005529
Matt Arsenault0084adc2018-04-30 19:08:16 +00005530 // Special case v_mac_{f16, f32} and v_fmac_f32 (gfx906):
Sam Koltona3ec5c12016-10-07 14:46:06 +00005531 // it has src2 register operand that is tied to dst operand
5532 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00005533 // should be 0.
5534 if (Opc == AMDGPU::V_MAC_F32_e64_si ||
5535 Opc == AMDGPU::V_MAC_F32_e64_vi ||
5536 Opc == AMDGPU::V_MAC_F16_e64_vi ||
5537 Opc == AMDGPU::V_FMAC_F32_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005538 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005539 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005540 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
5541 ++it;
5542 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
5543 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005544}
5545
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005546void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005547 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005548 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005549}
5550
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005551void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
5552 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005553 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005554 const int Opc = Inst.getOpcode();
5555 const MCInstrDesc &Desc = MII.get(Opc);
5556
5557 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005558
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005559 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005560
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00005561 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
5562 assert(!IsPacked);
5563 Inst.addOperand(Inst.getOperand(0));
5564 }
5565
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005566 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
5567 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005568
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005569 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005570
5571 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
5572 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005573 int DefaultVal = IsPacked ? -1 : 0;
5574 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
5575 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005576 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005577
5578 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
5579 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005580 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005581 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
5582 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
5583 }
5584
5585 const int Ops[] = { AMDGPU::OpName::src0,
5586 AMDGPU::OpName::src1,
5587 AMDGPU::OpName::src2 };
5588 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
5589 AMDGPU::OpName::src1_modifiers,
5590 AMDGPU::OpName::src2_modifiers };
5591
5592 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005593
5594 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005595 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005596 unsigned NegLo = 0;
5597 unsigned NegHi = 0;
5598
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005599 if (OpSelHiIdx != -1) {
5600 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
5601 }
5602
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005603 if (NegLoIdx != -1) {
5604 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
5605 NegLo = Inst.getOperand(NegLoIdx).getImm();
5606 NegHi = Inst.getOperand(NegHiIdx).getImm();
5607 }
5608
5609 for (int J = 0; J < 3; ++J) {
5610 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
5611 if (OpIdx == -1)
5612 break;
5613
5614 uint32_t ModVal = 0;
5615
5616 if ((OpSel & (1 << J)) != 0)
5617 ModVal |= SISrcMods::OP_SEL_0;
5618
5619 if ((OpSelHi & (1 << J)) != 0)
5620 ModVal |= SISrcMods::OP_SEL_1;
5621
5622 if ((NegLo & (1 << J)) != 0)
5623 ModVal |= SISrcMods::NEG;
5624
5625 if ((NegHi & (1 << J)) != 0)
5626 ModVal |= SISrcMods::NEG_HI;
5627
5628 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
5629
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00005630 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005631 }
5632}
5633
Sam Koltondfa29f72016-03-09 12:29:31 +00005634//===----------------------------------------------------------------------===//
5635// dpp
5636//===----------------------------------------------------------------------===//
5637
5638bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005639 using namespace AMDGPU::DPP;
5640
Sam Koltondfa29f72016-03-09 12:29:31 +00005641 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
5642 if (result) {
5643 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005644 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
5645 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
5646 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
5647 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
5648 (Imm == DppCtrl::WAVE_SHL1) ||
5649 (Imm == DppCtrl::WAVE_ROL1) ||
5650 (Imm == DppCtrl::WAVE_SHR1) ||
5651 (Imm == DppCtrl::WAVE_ROR1) ||
5652 (Imm == DppCtrl::ROW_MIRROR) ||
5653 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
5654 (Imm == DppCtrl::BCAST15) ||
5655 (Imm == DppCtrl::BCAST31);
Sam Koltondfa29f72016-03-09 12:29:31 +00005656 }
5657 return false;
5658}
5659
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00005660bool AMDGPUOperand::isS16Imm() const {
5661 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
5662}
5663
5664bool AMDGPUOperand::isU16Imm() const {
5665 return isImm() && isUInt<16>(getImm());
5666}
5667
Alex Bradbury58eba092016-11-01 16:32:05 +00005668OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00005669AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005670 using namespace AMDGPU::DPP;
5671
Sam Koltondfa29f72016-03-09 12:29:31 +00005672 SMLoc S = Parser.getTok().getLoc();
5673 StringRef Prefix;
5674 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00005675
Sam Koltona74cd522016-03-18 15:35:51 +00005676 if (getLexer().getKind() == AsmToken::Identifier) {
5677 Prefix = Parser.getTok().getString();
5678 } else {
5679 return MatchOperand_NoMatch;
5680 }
5681
5682 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005683 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005684 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005685 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005686 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005687 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005688 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005689 // Check to prevent parseDPPCtrlOps from eating invalid tokens
5690 if (Prefix != "quad_perm"
5691 && Prefix != "row_shl"
5692 && Prefix != "row_shr"
5693 && Prefix != "row_ror"
5694 && Prefix != "wave_shl"
5695 && Prefix != "wave_rol"
5696 && Prefix != "wave_shr"
5697 && Prefix != "wave_ror"
5698 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00005699 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00005700 }
5701
Sam Koltona74cd522016-03-18 15:35:51 +00005702 Parser.Lex();
5703 if (getLexer().isNot(AsmToken::Colon))
5704 return MatchOperand_ParseFail;
5705
5706 if (Prefix == "quad_perm") {
5707 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00005708 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005709 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00005710 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005711 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005712
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005713 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00005714 return MatchOperand_ParseFail;
5715
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005716 for (int i = 0; i < 3; ++i) {
5717 if (getLexer().isNot(AsmToken::Comma))
5718 return MatchOperand_ParseFail;
5719 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005720
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005721 int64_t Temp;
5722 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
5723 return MatchOperand_ParseFail;
5724 const int shift = i*2 + 2;
5725 Int += (Temp << shift);
5726 }
Sam Koltona74cd522016-03-18 15:35:51 +00005727
Sam Koltona74cd522016-03-18 15:35:51 +00005728 if (getLexer().isNot(AsmToken::RBrac))
5729 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005730 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005731 } else {
5732 // sel:%d
5733 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005734 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00005735 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005736
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005737 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005738 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005739 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005740 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005741 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005742 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005743 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005744 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005745 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005746 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005747 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005748 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005749 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005750 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00005751 } else if (Prefix == "row_bcast") {
5752 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005753 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00005754 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005755 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00005756 } else {
5757 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005758 }
5759 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005760 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005761 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005762 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005763 }
Sam Koltona74cd522016-03-18 15:35:51 +00005764
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005765 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00005766 return MatchOperand_Success;
5767}
5768
Sam Kolton5f10a132016-05-06 11:31:17 +00005769AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005770 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005771}
5772
David Stuttard20ea21c2019-03-12 09:52:58 +00005773AMDGPUOperand::Ptr AMDGPUAsmParser::defaultEndpgmImmOperands() const {
5774 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyEndpgm);
5775}
5776
Sam Kolton5f10a132016-05-06 11:31:17 +00005777AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005778 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005779}
5780
Sam Kolton5f10a132016-05-06 11:31:17 +00005781AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005782 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00005783}
5784
5785void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00005786 OptionalImmIndexMap OptionalIdx;
5787
5788 unsigned I = 1;
5789 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5790 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5791 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5792 }
5793
5794 for (unsigned E = Operands.size(); I != E; ++I) {
Valery Pykhtin3d9afa22018-11-30 14:21:56 +00005795 auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(),
5796 MCOI::TIED_TO);
5797 if (TiedTo != -1) {
5798 assert((unsigned)TiedTo < Inst.getNumOperands());
5799 // handle tied old or src2 for MAC instructions
5800 Inst.addOperand(Inst.getOperand(TiedTo));
5801 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005802 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5803 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00005804 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00005805 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00005806 // Skip it.
5807 continue;
5808 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00005809 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00005810 } else if (Op.isDPPCtrl()) {
5811 Op.addImmOperands(Inst, 1);
5812 } else if (Op.isImm()) {
5813 // Handle optional arguments
5814 OptionalIdx[Op.getImmTy()] = I;
5815 } else {
5816 llvm_unreachable("Invalid operand type");
5817 }
5818 }
5819
Sam Koltondfa29f72016-03-09 12:29:31 +00005820 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
5821 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
5822 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
5823}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00005824
Sam Kolton3025e7f2016-04-26 13:33:56 +00005825//===----------------------------------------------------------------------===//
5826// sdwa
5827//===----------------------------------------------------------------------===//
5828
Alex Bradbury58eba092016-11-01 16:32:05 +00005829OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00005830AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
5831 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005832 using namespace llvm::AMDGPU::SDWA;
5833
Sam Kolton3025e7f2016-04-26 13:33:56 +00005834 SMLoc S = Parser.getTok().getLoc();
5835 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005836 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00005837
Sam Kolton05ef1c92016-06-03 10:27:37 +00005838 res = parseStringWithPrefix(Prefix, Value);
5839 if (res != MatchOperand_Success) {
5840 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005841 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005842
Sam Kolton3025e7f2016-04-26 13:33:56 +00005843 int64_t Int;
5844 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005845 .Case("BYTE_0", SdwaSel::BYTE_0)
5846 .Case("BYTE_1", SdwaSel::BYTE_1)
5847 .Case("BYTE_2", SdwaSel::BYTE_2)
5848 .Case("BYTE_3", SdwaSel::BYTE_3)
5849 .Case("WORD_0", SdwaSel::WORD_0)
5850 .Case("WORD_1", SdwaSel::WORD_1)
5851 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005852 .Default(0xffffffff);
5853 Parser.Lex(); // eat last token
5854
5855 if (Int == 0xffffffff) {
5856 return MatchOperand_ParseFail;
5857 }
5858
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005859 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005860 return MatchOperand_Success;
5861}
5862
Alex Bradbury58eba092016-11-01 16:32:05 +00005863OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00005864AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005865 using namespace llvm::AMDGPU::SDWA;
5866
Sam Kolton3025e7f2016-04-26 13:33:56 +00005867 SMLoc S = Parser.getTok().getLoc();
5868 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005869 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005870
5871 res = parseStringWithPrefix("dst_unused", Value);
5872 if (res != MatchOperand_Success) {
5873 return res;
5874 }
5875
5876 int64_t Int;
5877 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005878 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
5879 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
5880 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005881 .Default(0xffffffff);
5882 Parser.Lex(); // eat last token
5883
5884 if (Int == 0xffffffff) {
5885 return MatchOperand_ParseFail;
5886 }
5887
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005888 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005889 return MatchOperand_Success;
5890}
5891
Sam Kolton945231a2016-06-10 09:57:59 +00005892void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005893 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005894}
5895
Sam Kolton945231a2016-06-10 09:57:59 +00005896void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005897 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
5898}
5899
Sam Koltonf7659d712017-05-23 10:08:55 +00005900void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
5901 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
5902}
5903
Sam Kolton5196b882016-07-01 09:59:21 +00005904void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005905 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00005906}
5907
5908void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00005909 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00005910 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005911
Sam Kolton05ef1c92016-06-03 10:27:37 +00005912 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00005913 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005914
5915 unsigned I = 1;
5916 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5917 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5918 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5919 }
5920
5921 for (unsigned E = Operands.size(); I != E; ++I) {
5922 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltonf7659d712017-05-23 10:08:55 +00005923 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
5924 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
5925 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
5926 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
5927 // Skip VCC only if we didn't skip it on previous iteration.
5928 if (BasicInstType == SIInstrFlags::VOP2 &&
5929 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
5930 skippedVcc = true;
5931 continue;
5932 } else if (BasicInstType == SIInstrFlags::VOPC &&
5933 Inst.getNumOperands() == 0) {
5934 skippedVcc = true;
5935 continue;
5936 }
5937 }
5938 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00005939 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005940 } else if (Op.isImm()) {
5941 // Handle optional arguments
5942 OptionalIdx[Op.getImmTy()] = I;
5943 } else {
5944 llvm_unreachable("Invalid operand type");
5945 }
Sam Koltonf7659d712017-05-23 10:08:55 +00005946 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005947 }
5948
Sam Koltonf7659d712017-05-23 10:08:55 +00005949 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
5950 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00005951 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00005952 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005953 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00005954 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005955 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005956 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5957 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005958 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5959 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5960 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005961 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005962
5963 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00005964 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005965 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005966 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5967 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005968 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5969 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5970 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5971 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005972 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005973
5974 case SIInstrFlags::VOPC:
Sam Kolton549c89d2017-06-21 08:53:38 +00005975 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00005976 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5977 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005978 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005979
Sam Koltona3ec5c12016-10-07 14:46:06 +00005980 default:
5981 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
5982 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005983 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00005984
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005985 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00005986 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00005987 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
5988 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005989 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005990 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00005991 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005992 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00005993 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005994}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00005995
Tom Stellard45bb48e2015-06-13 03:28:10 +00005996/// Force static initialization.
5997extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00005998 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
5999 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00006000}
6001
6002#define GET_REGISTER_MATCHER
6003#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00006004#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00006005#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00006006
Sam Kolton11de3702016-05-24 12:38:33 +00006007// This fuction should be defined after auto-generated include so that we have
6008// MatchClassKind enum defined
6009unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
6010 unsigned Kind) {
6011 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00006012 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00006013 // operand. This method checks if we are given immediate operand but expect to
6014 // get corresponding token.
6015 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
6016 switch (Kind) {
6017 case MCK_addr64:
6018 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
6019 case MCK_gds:
6020 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00006021 case MCK_lds:
6022 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00006023 case MCK_glc:
6024 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
6025 case MCK_idxen:
6026 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
6027 case MCK_offen:
6028 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006029 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00006030 // When operands have expression values, they will return true for isToken,
6031 // because it is not possible to distinguish between a token and an
6032 // expression at parse time. MatchInstructionImpl() will always try to
6033 // match an operand as a token, when isToken returns true, and when the
6034 // name of the expression is not a valid token, the match will fail,
6035 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00006036 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
6037 case MCK_SSrcF32:
6038 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00006039 case MCK_SoppBrTarget:
6040 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00006041 case MCK_VReg32OrOff:
6042 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00006043 case MCK_InterpSlot:
6044 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
6045 case MCK_Attr:
6046 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
6047 case MCK_AttrChan:
6048 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00006049 default:
6050 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00006051 }
6052}
David Stuttard20ea21c2019-03-12 09:52:58 +00006053
6054//===----------------------------------------------------------------------===//
6055// endpgm
6056//===----------------------------------------------------------------------===//
6057
6058OperandMatchResultTy AMDGPUAsmParser::parseEndpgmOp(OperandVector &Operands) {
6059 SMLoc S = Parser.getTok().getLoc();
6060 int64_t Imm = 0;
6061
6062 if (!parseExpr(Imm)) {
6063 // The operand is optional, if not present default to 0
6064 Imm = 0;
6065 }
6066
6067 if (!isUInt<16>(Imm)) {
6068 Error(S, "expected a 16-bit value");
6069 return MatchOperand_ParseFail;
6070 }
6071
6072 Operands.push_back(
6073 AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyEndpgm));
6074 return MatchOperand_Success;
6075}
6076
6077bool AMDGPUOperand::isEndpgm() const { return isImmTy(ImmTyEndpgm); }