blob: 54df9728db9ef354317e60bbfbd0de993628169e [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000014#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000015#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000016#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000018#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000019#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000021#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000022#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000023#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000026#include "llvm/BinaryFormat/ELF.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000027#include "llvm/CodeGen/MachineValueType.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000028#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000032#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000033#include "llvm/MC/MCInstrInfo.h"
34#include "llvm/MC/MCParser/MCAsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000036#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000038#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000042#include "llvm/MC/MCSymbol.h"
43#include "llvm/Support/Casting.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000044#include "llvm/Support/ErrorHandling.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000045#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/SMLoc.h"
47#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000048#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000049#include <algorithm>
50#include <cassert>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <map>
55#include <memory>
56#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000057
Tom Stellard45bb48e2015-06-13 03:28:10 +000058using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000059using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000060
61namespace {
62
Sam Kolton1eeb11b2016-09-09 14:44:04 +000063class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000064
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000065enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
66
Sam Kolton1eeb11b2016-09-09 14:44:04 +000067//===----------------------------------------------------------------------===//
68// Operand
69//===----------------------------------------------------------------------===//
70
Tom Stellard45bb48e2015-06-13 03:28:10 +000071class AMDGPUOperand : public MCParsedAsmOperand {
72 enum KindTy {
73 Token,
74 Immediate,
75 Register,
76 Expression
77 } Kind;
78
79 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000080 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000081
82public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000083 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000084 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000085
Sam Kolton5f10a132016-05-06 11:31:17 +000086 typedef std::unique_ptr<AMDGPUOperand> Ptr;
87
Sam Kolton945231a2016-06-10 09:57:59 +000088 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000089 bool Abs = false;
90 bool Neg = false;
91 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000092
93 bool hasFPModifiers() const { return Abs || Neg; }
94 bool hasIntModifiers() const { return Sext; }
95 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
96
97 int64_t getFPModifiersOperand() const {
98 int64_t Operand = 0;
99 Operand |= Abs ? SISrcMods::ABS : 0;
100 Operand |= Neg ? SISrcMods::NEG : 0;
101 return Operand;
102 }
103
104 int64_t getIntModifiersOperand() const {
105 int64_t Operand = 0;
106 Operand |= Sext ? SISrcMods::SEXT : 0;
107 return Operand;
108 }
109
110 int64_t getModifiersOperand() const {
111 assert(!(hasFPModifiers() && hasIntModifiers())
112 && "fp and int modifiers should not be used simultaneously");
113 if (hasFPModifiers()) {
114 return getFPModifiersOperand();
115 } else if (hasIntModifiers()) {
116 return getIntModifiersOperand();
117 } else {
118 return 0;
119 }
120 }
121
122 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
123 };
124
Tom Stellard45bb48e2015-06-13 03:28:10 +0000125 enum ImmTy {
126 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000128 ImmTyOffen,
129 ImmTyIdxen,
130 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000132 ImmTyOffset0,
133 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyGLC,
135 ImmTySLC,
136 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000137 ImmTyClampSI,
138 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000139 ImmTyDppCtrl,
140 ImmTyDppRowMask,
141 ImmTyDppBankMask,
142 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000143 ImmTySdwaDstSel,
144 ImmTySdwaSrc0Sel,
145 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000146 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000147 ImmTyDMask,
148 ImmTyUNorm,
149 ImmTyDA,
150 ImmTyR128,
151 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000152 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000153 ImmTyExpCompr,
154 ImmTyExpVM,
Artem Tamazovd6468662016-04-25 14:13:51 +0000155 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000156 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000157 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000158 ImmTyInterpSlot,
159 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000160 ImmTyAttrChan,
161 ImmTyOpSel,
162 ImmTyOpSelHi,
163 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000164 ImmTyNegHi,
165 ImmTySwizzle
Tom Stellard45bb48e2015-06-13 03:28:10 +0000166 };
167
168 struct TokOp {
169 const char *Data;
170 unsigned Length;
171 };
172
173 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000174 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000175 ImmTy Type;
176 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000177 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000178 };
179
180 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000181 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000182 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000183 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000184 };
185
186 union {
187 TokOp Tok;
188 ImmOp Imm;
189 RegOp Reg;
190 const MCExpr *Expr;
191 };
192
Tom Stellard45bb48e2015-06-13 03:28:10 +0000193 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000194 if (Kind == Token)
195 return true;
196
197 if (Kind != Expression || !Expr)
198 return false;
199
200 // When parsing operands, we can't always tell if something was meant to be
201 // a token, like 'gds', or an expression that references a global variable.
202 // In this case, we assume the string is an expression, and if we need to
203 // interpret is a token, then we treat the symbol name as the token.
204 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000205 }
206
207 bool isImm() const override {
208 return Kind == Immediate;
209 }
210
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000211 bool isInlinableImm(MVT type) const;
212 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000213
Tom Stellard45bb48e2015-06-13 03:28:10 +0000214 bool isRegKind() const {
215 return Kind == Register;
216 }
217
218 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000219 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000220 }
221
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000222 bool isRegOrImmWithInputMods(MVT type) const {
223 return isRegKind() || isInlinableImm(type);
224 }
225
Matt Arsenault4bd72362016-12-10 00:39:12 +0000226 bool isRegOrImmWithInt16InputMods() const {
227 return isRegOrImmWithInputMods(MVT::i16);
228 }
229
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000230 bool isRegOrImmWithInt32InputMods() const {
231 return isRegOrImmWithInputMods(MVT::i32);
232 }
233
234 bool isRegOrImmWithInt64InputMods() const {
235 return isRegOrImmWithInputMods(MVT::i64);
236 }
237
Matt Arsenault4bd72362016-12-10 00:39:12 +0000238 bool isRegOrImmWithFP16InputMods() const {
239 return isRegOrImmWithInputMods(MVT::f16);
240 }
241
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000242 bool isRegOrImmWithFP32InputMods() const {
243 return isRegOrImmWithInputMods(MVT::f32);
244 }
245
246 bool isRegOrImmWithFP64InputMods() const {
247 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000248 }
249
Sam Kolton9772eb32017-01-11 11:46:30 +0000250 bool isVReg() const {
251 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
252 isRegClass(AMDGPU::VReg_64RegClassID) ||
253 isRegClass(AMDGPU::VReg_96RegClassID) ||
254 isRegClass(AMDGPU::VReg_128RegClassID) ||
255 isRegClass(AMDGPU::VReg_256RegClassID) ||
256 isRegClass(AMDGPU::VReg_512RegClassID);
257 }
258
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000259 bool isVReg32OrOff() const {
260 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
261 }
262
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000263 bool isImmTy(ImmTy ImmT) const {
264 return isImm() && Imm.Type == ImmT;
265 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000266
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000267 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000268 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000269 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000270
Sam Kolton945231a2016-06-10 09:57:59 +0000271 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
272 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
273 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000274 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
275 bool isDA() const { return isImmTy(ImmTyDA); }
276 bool isR128() const { return isImmTy(ImmTyUNorm); }
277 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000278 bool isOff() const { return isImmTy(ImmTyOff); }
279 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000280 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
281 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000282 bool isOffen() const { return isImmTy(ImmTyOffen); }
283 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
284 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
285 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
286 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
287 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000288
289 bool isOffsetU12() const { return isImmTy(ImmTyOffset) && isUInt<12>(getImm()); }
290 bool isOffsetS13() const { return isImmTy(ImmTyOffset) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000291 bool isGDS() const { return isImmTy(ImmTyGDS); }
292 bool isGLC() const { return isImmTy(ImmTyGLC); }
293 bool isSLC() const { return isImmTy(ImmTySLC); }
294 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000295 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
296 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
297 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
298 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
299 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
300 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
301 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000302 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
303 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
304 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000305 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
306 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
307 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
308 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000309
Sam Kolton945231a2016-06-10 09:57:59 +0000310 bool isMod() const {
311 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000312 }
313
314 bool isRegOrImm() const {
315 return isReg() || isImm();
316 }
317
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000318 bool isRegClass(unsigned RCID) const;
319
Sam Kolton9772eb32017-01-11 11:46:30 +0000320 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
321 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
322 }
323
Matt Arsenault4bd72362016-12-10 00:39:12 +0000324 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000325 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000326 }
327
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000328 bool isSCSrcV2B16() const {
329 return isSCSrcB16();
330 }
331
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000332 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000333 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000334 }
335
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000336 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000337 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000338 }
339
Matt Arsenault4bd72362016-12-10 00:39:12 +0000340 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000341 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000342 }
343
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000344 bool isSCSrcV2F16() const {
345 return isSCSrcF16();
346 }
347
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000348 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000349 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000350 }
351
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000352 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000353 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000354 }
355
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000356 bool isSSrcB32() const {
357 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
358 }
359
Matt Arsenault4bd72362016-12-10 00:39:12 +0000360 bool isSSrcB16() const {
361 return isSCSrcB16() || isLiteralImm(MVT::i16);
362 }
363
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000364 bool isSSrcV2B16() const {
365 llvm_unreachable("cannot happen");
366 return isSSrcB16();
367 }
368
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000369 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000370 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
371 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000372 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000373 }
374
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000375 bool isSSrcF32() const {
376 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000377 }
378
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000379 bool isSSrcF64() const {
380 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000381 }
382
Matt Arsenault4bd72362016-12-10 00:39:12 +0000383 bool isSSrcF16() const {
384 return isSCSrcB16() || isLiteralImm(MVT::f16);
385 }
386
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000387 bool isSSrcV2F16() const {
388 llvm_unreachable("cannot happen");
389 return isSSrcF16();
390 }
391
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000392 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000393 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000394 }
395
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000396 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000397 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000398 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000399
Matt Arsenault4bd72362016-12-10 00:39:12 +0000400 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000401 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000402 }
403
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000404 bool isVCSrcV2B16() const {
405 return isVCSrcB16();
406 }
407
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000408 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000409 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000410 }
411
412 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000413 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000414 }
415
Matt Arsenault4bd72362016-12-10 00:39:12 +0000416 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000417 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000418 }
419
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000420 bool isVCSrcV2F16() const {
421 return isVCSrcF16();
422 }
423
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000424 bool isVSrcB32() const {
425 return isVCSrcF32() || isLiteralImm(MVT::i32);
426 }
427
428 bool isVSrcB64() const {
429 return isVCSrcF64() || isLiteralImm(MVT::i64);
430 }
431
Matt Arsenault4bd72362016-12-10 00:39:12 +0000432 bool isVSrcB16() const {
433 return isVCSrcF16() || isLiteralImm(MVT::i16);
434 }
435
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000436 bool isVSrcV2B16() const {
437 llvm_unreachable("cannot happen");
438 return isVSrcB16();
439 }
440
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000441 bool isVSrcF32() const {
442 return isVCSrcF32() || isLiteralImm(MVT::f32);
443 }
444
445 bool isVSrcF64() const {
446 return isVCSrcF64() || isLiteralImm(MVT::f64);
447 }
448
Matt Arsenault4bd72362016-12-10 00:39:12 +0000449 bool isVSrcF16() const {
450 return isVCSrcF16() || isLiteralImm(MVT::f16);
451 }
452
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000453 bool isVSrcV2F16() const {
454 llvm_unreachable("cannot happen");
455 return isVSrcF16();
456 }
457
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000458 bool isKImmFP32() const {
459 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000460 }
461
Matt Arsenault4bd72362016-12-10 00:39:12 +0000462 bool isKImmFP16() const {
463 return isLiteralImm(MVT::f16);
464 }
465
Tom Stellard45bb48e2015-06-13 03:28:10 +0000466 bool isMem() const override {
467 return false;
468 }
469
470 bool isExpr() const {
471 return Kind == Expression;
472 }
473
474 bool isSoppBrTarget() const {
475 return isExpr() || isImm();
476 }
477
Sam Kolton945231a2016-06-10 09:57:59 +0000478 bool isSWaitCnt() const;
479 bool isHwreg() const;
480 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000481 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000482 bool isSMRDOffset8() const;
483 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000484 bool isSMRDLiteralOffset() const;
485 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000486 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000487 bool isS16Imm() const;
488 bool isU16Imm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000489
Tom Stellard89049702016-06-15 02:54:14 +0000490 StringRef getExpressionAsToken() const {
491 assert(isExpr());
492 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
493 return S->getSymbol().getName();
494 }
495
Sam Kolton945231a2016-06-10 09:57:59 +0000496 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000497 assert(isToken());
498
499 if (Kind == Expression)
500 return getExpressionAsToken();
501
Sam Kolton945231a2016-06-10 09:57:59 +0000502 return StringRef(Tok.Data, Tok.Length);
503 }
504
505 int64_t getImm() const {
506 assert(isImm());
507 return Imm.Val;
508 }
509
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000510 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000511 assert(isImm());
512 return Imm.Type;
513 }
514
515 unsigned getReg() const override {
516 return Reg.RegNo;
517 }
518
Tom Stellard45bb48e2015-06-13 03:28:10 +0000519 SMLoc getStartLoc() const override {
520 return StartLoc;
521 }
522
Peter Collingbourne0da86302016-10-10 22:49:37 +0000523 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000524 return EndLoc;
525 }
526
Sam Kolton945231a2016-06-10 09:57:59 +0000527 Modifiers getModifiers() const {
528 assert(isRegKind() || isImmTy(ImmTyNone));
529 return isRegKind() ? Reg.Mods : Imm.Mods;
530 }
531
532 void setModifiers(Modifiers Mods) {
533 assert(isRegKind() || isImmTy(ImmTyNone));
534 if (isRegKind())
535 Reg.Mods = Mods;
536 else
537 Imm.Mods = Mods;
538 }
539
540 bool hasModifiers() const {
541 return getModifiers().hasModifiers();
542 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000543
Sam Kolton945231a2016-06-10 09:57:59 +0000544 bool hasFPModifiers() const {
545 return getModifiers().hasFPModifiers();
546 }
547
548 bool hasIntModifiers() const {
549 return getModifiers().hasIntModifiers();
550 }
551
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000552 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
553
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000554 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000555
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000556 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000557
Matt Arsenault4bd72362016-12-10 00:39:12 +0000558 template <unsigned Bitwidth>
559 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
560
561 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
562 addKImmFPOperands<16>(Inst, N);
563 }
564
565 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
566 addKImmFPOperands<32>(Inst, N);
567 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000568
569 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000570
571 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
572 if (isRegKind())
573 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000574 else if (isExpr())
575 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000576 else
577 addImmOperands(Inst, N);
578 }
579
580 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
581 Modifiers Mods = getModifiers();
582 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
583 if (isRegKind()) {
584 addRegOperands(Inst, N);
585 } else {
586 addImmOperands(Inst, N, false);
587 }
588 }
589
590 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
591 assert(!hasIntModifiers());
592 addRegOrImmWithInputModsOperands(Inst, N);
593 }
594
595 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
596 assert(!hasFPModifiers());
597 addRegOrImmWithInputModsOperands(Inst, N);
598 }
599
Sam Kolton9772eb32017-01-11 11:46:30 +0000600 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
601 Modifiers Mods = getModifiers();
602 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
603 assert(isRegKind());
604 addRegOperands(Inst, N);
605 }
606
607 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
608 assert(!hasIntModifiers());
609 addRegWithInputModsOperands(Inst, N);
610 }
611
612 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
613 assert(!hasFPModifiers());
614 addRegWithInputModsOperands(Inst, N);
615 }
616
Sam Kolton945231a2016-06-10 09:57:59 +0000617 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
618 if (isImm())
619 addImmOperands(Inst, N);
620 else {
621 assert(isExpr());
622 Inst.addOperand(MCOperand::createExpr(Expr));
623 }
624 }
625
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000626 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000627 switch (Type) {
628 case ImmTyNone: OS << "None"; break;
629 case ImmTyGDS: OS << "GDS"; break;
630 case ImmTyOffen: OS << "Offen"; break;
631 case ImmTyIdxen: OS << "Idxen"; break;
632 case ImmTyAddr64: OS << "Addr64"; break;
633 case ImmTyOffset: OS << "Offset"; break;
634 case ImmTyOffset0: OS << "Offset0"; break;
635 case ImmTyOffset1: OS << "Offset1"; break;
636 case ImmTyGLC: OS << "GLC"; break;
637 case ImmTySLC: OS << "SLC"; break;
638 case ImmTyTFE: OS << "TFE"; break;
639 case ImmTyClampSI: OS << "ClampSI"; break;
640 case ImmTyOModSI: OS << "OModSI"; break;
641 case ImmTyDppCtrl: OS << "DppCtrl"; break;
642 case ImmTyDppRowMask: OS << "DppRowMask"; break;
643 case ImmTyDppBankMask: OS << "DppBankMask"; break;
644 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000645 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
646 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
647 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000648 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
649 case ImmTyDMask: OS << "DMask"; break;
650 case ImmTyUNorm: OS << "UNorm"; break;
651 case ImmTyDA: OS << "DA"; break;
652 case ImmTyR128: OS << "R128"; break;
653 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000654 case ImmTyOff: OS << "Off"; break;
655 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000656 case ImmTyExpCompr: OS << "ExpCompr"; break;
657 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000658 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000659 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000660 case ImmTyInterpSlot: OS << "InterpSlot"; break;
661 case ImmTyInterpAttr: OS << "InterpAttr"; break;
662 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000663 case ImmTyOpSel: OS << "OpSel"; break;
664 case ImmTyOpSelHi: OS << "OpSelHi"; break;
665 case ImmTyNegLo: OS << "NegLo"; break;
666 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000667 case ImmTySwizzle: OS << "Swizzle"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000668 }
669 }
670
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000671 void print(raw_ostream &OS) const override {
672 switch (Kind) {
673 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000674 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000675 break;
676 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000677 OS << '<' << getImm();
678 if (getImmTy() != ImmTyNone) {
679 OS << " type: "; printImmTy(OS, getImmTy());
680 }
Sam Kolton945231a2016-06-10 09:57:59 +0000681 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000682 break;
683 case Token:
684 OS << '\'' << getToken() << '\'';
685 break;
686 case Expression:
687 OS << "<expr " << *Expr << '>';
688 break;
689 }
690 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000691
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000692 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
693 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000694 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000695 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000696 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697 Op->Imm.Val = Val;
698 Op->Imm.IsFPImm = IsFPImm;
699 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000700 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000701 Op->StartLoc = Loc;
702 Op->EndLoc = Loc;
703 return Op;
704 }
705
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000706 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
707 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000708 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000709 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000710 Res->Tok.Data = Str.data();
711 Res->Tok.Length = Str.size();
712 Res->StartLoc = Loc;
713 Res->EndLoc = Loc;
714 return Res;
715 }
716
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000717 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
718 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000719 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000720 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000721 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000722 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000723 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000724 Op->Reg.IsForcedVOP3 = ForceVOP3;
725 Op->StartLoc = S;
726 Op->EndLoc = E;
727 return Op;
728 }
729
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000730 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
731 const class MCExpr *Expr, SMLoc S) {
732 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000733 Op->Expr = Expr;
734 Op->StartLoc = S;
735 Op->EndLoc = S;
736 return Op;
737 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000738};
739
Sam Kolton945231a2016-06-10 09:57:59 +0000740raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
741 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
742 return OS;
743}
744
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000745//===----------------------------------------------------------------------===//
746// AsmParser
747//===----------------------------------------------------------------------===//
748
Artem Tamazova01cce82016-12-27 16:00:11 +0000749// Holds info related to the current kernel, e.g. count of SGPRs used.
750// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
751// .amdgpu_hsa_kernel or at EOF.
752class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000753 int SgprIndexUnusedMin = -1;
754 int VgprIndexUnusedMin = -1;
755 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000756
757 void usesSgprAt(int i) {
758 if (i >= SgprIndexUnusedMin) {
759 SgprIndexUnusedMin = ++i;
760 if (Ctx) {
761 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
762 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
763 }
764 }
765 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000766
Artem Tamazova01cce82016-12-27 16:00:11 +0000767 void usesVgprAt(int i) {
768 if (i >= VgprIndexUnusedMin) {
769 VgprIndexUnusedMin = ++i;
770 if (Ctx) {
771 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
772 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
773 }
774 }
775 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000776
Artem Tamazova01cce82016-12-27 16:00:11 +0000777public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000778 KernelScopeInfo() = default;
779
Artem Tamazova01cce82016-12-27 16:00:11 +0000780 void initialize(MCContext &Context) {
781 Ctx = &Context;
782 usesSgprAt(SgprIndexUnusedMin = -1);
783 usesVgprAt(VgprIndexUnusedMin = -1);
784 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000785
Artem Tamazova01cce82016-12-27 16:00:11 +0000786 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
787 switch (RegKind) {
788 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
789 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
790 default: break;
791 }
792 }
793};
794
Tom Stellard45bb48e2015-06-13 03:28:10 +0000795class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000796 const MCInstrInfo &MII;
797 MCAsmParser &Parser;
798
Eugene Zelenko66203762017-01-21 00:53:49 +0000799 unsigned ForcedEncodingSize = 0;
800 bool ForcedDPP = false;
801 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000802 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000803
Tom Stellard45bb48e2015-06-13 03:28:10 +0000804 /// @name Auto-generated Match Functions
805 /// {
806
807#define GET_ASSEMBLER_HEADER
808#include "AMDGPUGenAsmMatcher.inc"
809
810 /// }
811
Tom Stellard347ac792015-06-26 21:15:07 +0000812private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000813 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Tom Stellard347ac792015-06-26 21:15:07 +0000814 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
815 bool ParseDirectiveHSACodeObjectVersion();
816 bool ParseDirectiveHSACodeObjectISA();
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +0000817 bool ParseDirectiveCodeObjectMetadata();
Tom Stellardff7416b2015-06-26 21:58:31 +0000818 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
819 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000820 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000821 bool ParseDirectiveAMDGPUHsaKernel();
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000822 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
823 RegisterKind RegKind, unsigned Reg1,
824 unsigned RegNum);
825 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
826 unsigned& RegNum, unsigned& RegWidth,
827 unsigned *DwordRegIndex);
828 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
829 bool IsAtomic, bool IsAtomicReturn);
830 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
831 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000832
Tom Stellard45bb48e2015-06-13 03:28:10 +0000833public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000834 enum AMDGPUMatchResultTy {
835 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
836 };
837
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000838 typedef std::map<AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
839
Akira Hatanakab11ef082015-11-14 06:35:56 +0000840 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000841 const MCInstrInfo &MII,
842 const MCTargetOptions &Options)
Eugene Zelenko66203762017-01-21 00:53:49 +0000843 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000844 MCAsmParserExtension::Initialize(Parser);
845
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000846 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000847 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000848 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000849 }
850
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000851 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000852
853 {
854 // TODO: make those pre-defined variables read-only.
855 // Currently there is none suitable machinery in the core llvm-mc for this.
856 // MCSymbol::isRedefinable is intended for another purpose, and
857 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000858 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000859 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Artem Tamazov17091362016-06-14 15:03:59 +0000860 MCContext &Ctx = getContext();
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000861 MCSymbol *Sym =
862 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
863 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000864 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000865 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000866 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000867 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000868 }
Artem Tamazova01cce82016-12-27 16:00:11 +0000869 KernelScope.initialize(getContext());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000870 }
871
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000872 bool isSI() const {
873 return AMDGPU::isSI(getSTI());
874 }
875
876 bool isCI() const {
877 return AMDGPU::isCI(getSTI());
878 }
879
880 bool isVI() const {
881 return AMDGPU::isVI(getSTI());
882 }
883
Sam Koltonf7659d712017-05-23 10:08:55 +0000884 bool isGFX9() const {
885 return AMDGPU::isGFX9(getSTI());
886 }
887
Matt Arsenault26faed32016-12-05 22:26:17 +0000888 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000889 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +0000890 }
891
Matt Arsenaultfd023142017-06-12 15:55:58 +0000892 bool hasFlatOffsets() const {
893 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
894 }
895
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000896 bool hasSGPR102_SGPR103() const {
897 return !isVI();
898 }
899
Tom Stellard347ac792015-06-26 21:15:07 +0000900 AMDGPUTargetStreamer &getTargetStreamer() {
901 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
902 return static_cast<AMDGPUTargetStreamer &>(TS);
903 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000904
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000905 const MCRegisterInfo *getMRI() const {
906 // We need this const_cast because for some reason getContext() is not const
907 // in MCAsmParser.
908 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
909 }
910
911 const MCInstrInfo *getMII() const {
912 return &MII;
913 }
914
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000915 const FeatureBitset &getFeatureBits() const {
916 return getSTI().getFeatureBits();
917 }
918
Sam Kolton05ef1c92016-06-03 10:27:37 +0000919 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
920 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
921 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000922
Sam Kolton05ef1c92016-06-03 10:27:37 +0000923 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
924 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
925 bool isForcedDPP() const { return ForcedDPP; }
926 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +0000927 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000928
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000929 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000930 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
931 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000932 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
933 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000934 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
935 OperandVector &Operands, MCStreamer &Out,
936 uint64_t &ErrorInfo,
937 bool MatchingInlineAsm) override;
938 bool ParseDirective(AsmToken DirectiveID) override;
939 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000940 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000941 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
942 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000943 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000944
Sam Kolton11de3702016-05-24 12:38:33 +0000945 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000946
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000947 OperandMatchResultTy
948 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000949 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000950 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000951
952 OperandMatchResultTy parseOperandArrayWithPrefix(
953 const char *Prefix,
954 OperandVector &Operands,
955 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
956 bool (*ConvertResult)(int64_t&) = nullptr);
957
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000958 OperandMatchResultTy
959 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000960 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000961 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
962 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000963
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +0000964 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
965 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +0000966 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +0000967 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +0000968 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
969 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
970 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
971 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000972 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000973
Tom Stellard45bb48e2015-06-13 03:28:10 +0000974 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +0000975 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
976 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000977 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000978
979 bool parseCnt(int64_t &IntVal);
980 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000981 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000982
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000983private:
984 struct OperandInfoTy {
985 int64_t Id;
986 bool IsSymbolic;
987 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
988 };
Sam Kolton11de3702016-05-24 12:38:33 +0000989
Artem Tamazov6edc1352016-05-26 17:00:33 +0000990 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
991 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000992
993 void errorExpTgt();
994 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
995
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +0000996 bool validateOperandLimitations(const MCInst &Inst);
997 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
998 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
999 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001000
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001001 bool trySkipId(const StringRef Id);
1002 bool trySkipToken(const AsmToken::TokenKind Kind);
1003 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1004 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1005 bool parseExpr(int64_t &Imm);
1006
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001007public:
Sam Kolton11de3702016-05-24 12:38:33 +00001008 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
1009
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001010 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001011 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001012 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1013 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001014 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1015
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001016 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1017 const unsigned MinVal,
1018 const unsigned MaxVal,
1019 const StringRef ErrMsg);
1020 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1021 bool parseSwizzleOffset(int64_t &Imm);
1022 bool parseSwizzleMacro(int64_t &Imm);
1023 bool parseSwizzleQuadPerm(int64_t &Imm);
1024 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1025 bool parseSwizzleBroadcast(int64_t &Imm);
1026 bool parseSwizzleSwap(int64_t &Imm);
1027 bool parseSwizzleReverse(int64_t &Imm);
1028
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001029 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1030 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1031 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +00001032 AMDGPUOperand::Ptr defaultGLC() const;
1033 AMDGPUOperand::Ptr defaultSLC() const;
1034 AMDGPUOperand::Ptr defaultTFE() const;
1035
Sam Kolton5f10a132016-05-06 11:31:17 +00001036 AMDGPUOperand::Ptr defaultDMask() const;
1037 AMDGPUOperand::Ptr defaultUNorm() const;
1038 AMDGPUOperand::Ptr defaultDA() const;
1039 AMDGPUOperand::Ptr defaultR128() const;
1040 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +00001041 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1042 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001043 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001044 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001045 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001046
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001047 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1048
Tom Stellarda90b9522016-02-11 03:28:15 +00001049 void cvtId(MCInst &Inst, const OperandVector &Operands);
1050 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001051
1052 void cvtVOP3Impl(MCInst &Inst,
1053 const OperandVector &Operands,
1054 OptionalImmIndexMap &OptionalIdx);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001055 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00001056 void cvtVOP3OMod(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001057 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001058
1059 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001060 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001061
Sam Kolton11de3702016-05-24 12:38:33 +00001062 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001063 AMDGPUOperand::Ptr defaultRowMask() const;
1064 AMDGPUOperand::Ptr defaultBankMask() const;
1065 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1066 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001067
Sam Kolton05ef1c92016-06-03 10:27:37 +00001068 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1069 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001070 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001071 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1072 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001073 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001074 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1075 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001076 uint64_t BasicInstType, bool skipVcc = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001077};
1078
1079struct OptionalOperand {
1080 const char *Name;
1081 AMDGPUOperand::ImmTy Type;
1082 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001083 bool (*ConvertResult)(int64_t&);
1084};
1085
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001086} // end anonymous namespace
1087
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001088// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001089static const fltSemantics *getFltSemantics(unsigned Size) {
1090 switch (Size) {
1091 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001092 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001093 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001094 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001095 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001096 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001097 default:
1098 llvm_unreachable("unsupported fp type");
1099 }
1100}
1101
Matt Arsenault4bd72362016-12-10 00:39:12 +00001102static const fltSemantics *getFltSemantics(MVT VT) {
1103 return getFltSemantics(VT.getSizeInBits() / 8);
1104}
1105
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001106static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1107 switch (OperandType) {
1108 case AMDGPU::OPERAND_REG_IMM_INT32:
1109 case AMDGPU::OPERAND_REG_IMM_FP32:
1110 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1111 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1112 return &APFloat::IEEEsingle();
1113 case AMDGPU::OPERAND_REG_IMM_INT64:
1114 case AMDGPU::OPERAND_REG_IMM_FP64:
1115 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1116 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1117 return &APFloat::IEEEdouble();
1118 case AMDGPU::OPERAND_REG_IMM_INT16:
1119 case AMDGPU::OPERAND_REG_IMM_FP16:
1120 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1121 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1122 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1123 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1124 return &APFloat::IEEEhalf();
1125 default:
1126 llvm_unreachable("unsupported fp type");
1127 }
1128}
1129
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001130//===----------------------------------------------------------------------===//
1131// Operand
1132//===----------------------------------------------------------------------===//
1133
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001134static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1135 bool Lost;
1136
1137 // Convert literal to single precision
1138 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1139 APFloat::rmNearestTiesToEven,
1140 &Lost);
1141 // We allow precision lost but not overflow or underflow
1142 if (Status != APFloat::opOK &&
1143 Lost &&
1144 ((Status & APFloat::opOverflow) != 0 ||
1145 (Status & APFloat::opUnderflow) != 0)) {
1146 return false;
1147 }
1148
1149 return true;
1150}
1151
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001152bool AMDGPUOperand::isInlinableImm(MVT type) const {
1153 if (!isImmTy(ImmTyNone)) {
1154 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1155 return false;
1156 }
1157 // TODO: We should avoid using host float here. It would be better to
1158 // check the float bit values which is what a few other places do.
1159 // We've had bot failures before due to weird NaN support on mips hosts.
1160
1161 APInt Literal(64, Imm.Val);
1162
1163 if (Imm.IsFPImm) { // We got fp literal token
1164 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001165 return AMDGPU::isInlinableLiteral64(Imm.Val,
1166 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001167 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001168
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001169 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001170 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1171 return false;
1172
Sam Kolton9dffada2017-01-17 15:26:02 +00001173 if (type.getScalarSizeInBits() == 16) {
1174 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001175 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001176 AsmParser->hasInv2PiInlineImm());
1177 }
1178
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001179 // Check if single precision literal is inlinable
1180 return AMDGPU::isInlinableLiteral32(
1181 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001182 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001183 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001184
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001185 // We got int literal token.
1186 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001187 return AMDGPU::isInlinableLiteral64(Imm.Val,
1188 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001189 }
1190
Matt Arsenault4bd72362016-12-10 00:39:12 +00001191 if (type.getScalarSizeInBits() == 16) {
1192 return AMDGPU::isInlinableLiteral16(
1193 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1194 AsmParser->hasInv2PiInlineImm());
1195 }
1196
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001197 return AMDGPU::isInlinableLiteral32(
1198 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001199 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001200}
1201
1202bool AMDGPUOperand::isLiteralImm(MVT type) const {
1203 // Check that this imediate can be added as literal
1204 if (!isImmTy(ImmTyNone)) {
1205 return false;
1206 }
1207
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001208 if (!Imm.IsFPImm) {
1209 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001210
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001211 if (type == MVT::f64 && hasFPModifiers()) {
1212 // Cannot apply fp modifiers to int literals preserving the same semantics
1213 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1214 // disable these cases.
1215 return false;
1216 }
1217
Matt Arsenault4bd72362016-12-10 00:39:12 +00001218 unsigned Size = type.getSizeInBits();
1219 if (Size == 64)
1220 Size = 32;
1221
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001222 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1223 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001224 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001225 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001226
1227 // We got fp literal token
1228 if (type == MVT::f64) { // Expected 64-bit fp operand
1229 // We would set low 64-bits of literal to zeroes but we accept this literals
1230 return true;
1231 }
1232
1233 if (type == MVT::i64) { // Expected 64-bit int operand
1234 // We don't allow fp literals in 64-bit integer instructions. It is
1235 // unclear how we should encode them.
1236 return false;
1237 }
1238
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001239 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001240 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001241}
1242
1243bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001244 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001245}
1246
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001247uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1248{
1249 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1250 assert(Size == 2 || Size == 4 || Size == 8);
1251
1252 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1253
1254 if (Imm.Mods.Abs) {
1255 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001256 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001257 if (Imm.Mods.Neg) {
1258 Val ^= FpSignMask;
1259 }
1260
1261 return Val;
1262}
1263
1264void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001265
Matt Arsenault4bd72362016-12-10 00:39:12 +00001266 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1267 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001268 addLiteralImmOperand(Inst, Imm.Val,
1269 ApplyModifiers &
1270 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001271 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001272 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1273 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001274 }
1275}
1276
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001277void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001278 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1279 auto OpNum = Inst.getNumOperands();
1280 // Check that this operand accepts literals
1281 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1282
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001283 if (ApplyModifiers) {
1284 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1285 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1286 Val = applyInputFPModifiers(Val, Size);
1287 }
1288
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001289 APInt Literal(64, Val);
1290 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001291
1292 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001293 switch (OpTy) {
1294 case AMDGPU::OPERAND_REG_IMM_INT64:
1295 case AMDGPU::OPERAND_REG_IMM_FP64:
1296 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1297 case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
Matt Arsenault26faed32016-12-05 22:26:17 +00001298 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1299 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001300 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001301 return;
1302 }
1303
1304 // Non-inlineable
1305 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001306 // For fp operands we check if low 32 bits are zeros
1307 if (Literal.getLoBits(32) != 0) {
1308 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001309 "Can't encode literal as exact 64-bit floating-point operand. "
1310 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001311 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001312
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001313 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001314 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001315 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001316
1317 // We don't allow fp literals in 64-bit integer instructions. It is
1318 // unclear how we should encode them. This case should be checked earlier
1319 // in predicate methods (isLiteralImm())
1320 llvm_unreachable("fp literal in 64-bit integer instruction.");
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001321 }
1322 case AMDGPU::OPERAND_REG_IMM_INT32:
1323 case AMDGPU::OPERAND_REG_IMM_FP32:
1324 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1325 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1326 case AMDGPU::OPERAND_REG_IMM_INT16:
1327 case AMDGPU::OPERAND_REG_IMM_FP16:
1328 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1329 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1330 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1331 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001332 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001333 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001334 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001335 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001336 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001337 // We allow precision lost but not overflow or underflow. This should be
1338 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001339
1340 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1341 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1342 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1343 ImmVal |= (ImmVal << 16);
1344 }
1345
1346 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001347 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001348 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001349 default:
1350 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001351 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001352
1353 return;
1354 }
1355
1356 // We got int literal token.
1357 // Only sign extend inline immediates.
1358 // FIXME: No errors on truncation
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001359 switch (OpTy) {
1360 case AMDGPU::OPERAND_REG_IMM_INT32:
1361 case AMDGPU::OPERAND_REG_IMM_FP32:
1362 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1363 case AMDGPU::OPERAND_REG_INLINE_C_FP32: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001364 if (isInt<32>(Val) &&
1365 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1366 AsmParser->hasInv2PiInlineImm())) {
1367 Inst.addOperand(MCOperand::createImm(Val));
1368 return;
1369 }
1370
1371 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1372 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001373 }
1374 case AMDGPU::OPERAND_REG_IMM_INT64:
1375 case AMDGPU::OPERAND_REG_IMM_FP64:
1376 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1377 case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
1378 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001379 Inst.addOperand(MCOperand::createImm(Val));
1380 return;
1381 }
1382
1383 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1384 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001385 }
1386 case AMDGPU::OPERAND_REG_IMM_INT16:
1387 case AMDGPU::OPERAND_REG_IMM_FP16:
1388 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1389 case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001390 if (isInt<16>(Val) &&
1391 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1392 AsmParser->hasInv2PiInlineImm())) {
1393 Inst.addOperand(MCOperand::createImm(Val));
1394 return;
1395 }
1396
1397 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1398 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001399 }
1400 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1401 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1402 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1403 assert(AMDGPU::isInlinableLiteral16(LiteralVal,
1404 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001405
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001406 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1407 static_cast<uint32_t>(LiteralVal);
1408 Inst.addOperand(MCOperand::createImm(ImmVal));
1409 return;
1410 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001411 default:
1412 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001413 }
1414}
1415
Matt Arsenault4bd72362016-12-10 00:39:12 +00001416template <unsigned Bitwidth>
1417void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001418 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001419
1420 if (!Imm.IsFPImm) {
1421 // We got int literal token.
1422 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1423 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001424 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001425
1426 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001427 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001428 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1429 APFloat::rmNearestTiesToEven, &Lost);
1430 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001431}
1432
1433void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1434 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1435}
1436
1437//===----------------------------------------------------------------------===//
1438// AsmParser
1439//===----------------------------------------------------------------------===//
1440
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001441static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1442 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001443 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001444 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001445 case 1: return AMDGPU::VGPR_32RegClassID;
1446 case 2: return AMDGPU::VReg_64RegClassID;
1447 case 3: return AMDGPU::VReg_96RegClassID;
1448 case 4: return AMDGPU::VReg_128RegClassID;
1449 case 8: return AMDGPU::VReg_256RegClassID;
1450 case 16: return AMDGPU::VReg_512RegClassID;
1451 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001452 } else if (Is == IS_TTMP) {
1453 switch (RegWidth) {
1454 default: return -1;
1455 case 1: return AMDGPU::TTMP_32RegClassID;
1456 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001457 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001458 }
1459 } else if (Is == IS_SGPR) {
1460 switch (RegWidth) {
1461 default: return -1;
1462 case 1: return AMDGPU::SGPR_32RegClassID;
1463 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001464 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001465 case 8: return AMDGPU::SReg_256RegClassID;
1466 case 16: return AMDGPU::SReg_512RegClassID;
1467 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001468 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001469 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001470}
1471
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001472static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001473 return StringSwitch<unsigned>(RegName)
1474 .Case("exec", AMDGPU::EXEC)
1475 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001476 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001477 .Case("m0", AMDGPU::M0)
1478 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001479 .Case("tba", AMDGPU::TBA)
1480 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001481 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1482 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001483 .Case("vcc_lo", AMDGPU::VCC_LO)
1484 .Case("vcc_hi", AMDGPU::VCC_HI)
1485 .Case("exec_lo", AMDGPU::EXEC_LO)
1486 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001487 .Case("tma_lo", AMDGPU::TMA_LO)
1488 .Case("tma_hi", AMDGPU::TMA_HI)
1489 .Case("tba_lo", AMDGPU::TBA_LO)
1490 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001491 .Default(0);
1492}
1493
Eugene Zelenko66203762017-01-21 00:53:49 +00001494bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1495 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001496 auto R = parseRegister();
1497 if (!R) return true;
1498 assert(R->isReg());
1499 RegNo = R->getReg();
1500 StartLoc = R->getStartLoc();
1501 EndLoc = R->getEndLoc();
1502 return false;
1503}
1504
Eugene Zelenko66203762017-01-21 00:53:49 +00001505bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1506 RegisterKind RegKind, unsigned Reg1,
1507 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001508 switch (RegKind) {
1509 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001510 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1511 Reg = AMDGPU::EXEC;
1512 RegWidth = 2;
1513 return true;
1514 }
1515 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1516 Reg = AMDGPU::FLAT_SCR;
1517 RegWidth = 2;
1518 return true;
1519 }
1520 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1521 Reg = AMDGPU::VCC;
1522 RegWidth = 2;
1523 return true;
1524 }
1525 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1526 Reg = AMDGPU::TBA;
1527 RegWidth = 2;
1528 return true;
1529 }
1530 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1531 Reg = AMDGPU::TMA;
1532 RegWidth = 2;
1533 return true;
1534 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001535 return false;
1536 case IS_VGPR:
1537 case IS_SGPR:
1538 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001539 if (Reg1 != Reg + RegWidth) {
1540 return false;
1541 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001542 RegWidth++;
1543 return true;
1544 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001545 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001546 }
1547}
1548
Eugene Zelenko66203762017-01-21 00:53:49 +00001549bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1550 unsigned &RegNum, unsigned &RegWidth,
1551 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001552 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001553 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1554 if (getLexer().is(AsmToken::Identifier)) {
1555 StringRef RegName = Parser.getTok().getString();
1556 if ((Reg = getSpecialRegForName(RegName))) {
1557 Parser.Lex();
1558 RegKind = IS_SPECIAL;
1559 } else {
1560 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001561 if (RegName[0] == 'v') {
1562 RegNumIndex = 1;
1563 RegKind = IS_VGPR;
1564 } else if (RegName[0] == 's') {
1565 RegNumIndex = 1;
1566 RegKind = IS_SGPR;
1567 } else if (RegName.startswith("ttmp")) {
1568 RegNumIndex = strlen("ttmp");
1569 RegKind = IS_TTMP;
1570 } else {
1571 return false;
1572 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001573 if (RegName.size() > RegNumIndex) {
1574 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001575 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1576 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001577 Parser.Lex();
1578 RegWidth = 1;
1579 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001580 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001581 Parser.Lex();
1582 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001583 if (getLexer().isNot(AsmToken::LBrac))
1584 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001585 Parser.Lex();
1586
Artem Tamazovf88397c2016-06-03 14:41:17 +00001587 if (getParser().parseAbsoluteExpression(RegLo))
1588 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001589
Artem Tamazov7da9b822016-05-27 12:50:13 +00001590 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001591 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1592 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001593 Parser.Lex();
1594
Artem Tamazov7da9b822016-05-27 12:50:13 +00001595 if (isRBrace) {
1596 RegHi = RegLo;
1597 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001598 if (getParser().parseAbsoluteExpression(RegHi))
1599 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001600
Artem Tamazovf88397c2016-06-03 14:41:17 +00001601 if (getLexer().isNot(AsmToken::RBrac))
1602 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001603 Parser.Lex();
1604 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001605 RegNum = (unsigned) RegLo;
1606 RegWidth = (RegHi - RegLo) + 1;
1607 }
1608 }
1609 } else if (getLexer().is(AsmToken::LBrac)) {
1610 // List of consecutive registers: [s0,s1,s2,s3]
1611 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001612 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001613 return false;
1614 if (RegWidth != 1)
1615 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001616 RegisterKind RegKind1;
1617 unsigned Reg1, RegNum1, RegWidth1;
1618 do {
1619 if (getLexer().is(AsmToken::Comma)) {
1620 Parser.Lex();
1621 } else if (getLexer().is(AsmToken::RBrac)) {
1622 Parser.Lex();
1623 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001624 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001625 if (RegWidth1 != 1) {
1626 return false;
1627 }
1628 if (RegKind1 != RegKind) {
1629 return false;
1630 }
1631 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1632 return false;
1633 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001634 } else {
1635 return false;
1636 }
1637 } while (true);
1638 } else {
1639 return false;
1640 }
1641 switch (RegKind) {
1642 case IS_SPECIAL:
1643 RegNum = 0;
1644 RegWidth = 1;
1645 break;
1646 case IS_VGPR:
1647 case IS_SGPR:
1648 case IS_TTMP:
1649 {
1650 unsigned Size = 1;
1651 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001652 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001653 Size = std::min(RegWidth, 4u);
1654 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001655 if (RegNum % Size != 0)
1656 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001657 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001658 RegNum = RegNum / Size;
1659 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001660 if (RCID == -1)
1661 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001662 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001663 if (RegNum >= RC.getNumRegs())
1664 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001665 Reg = RC.getRegister(RegNum);
1666 break;
1667 }
1668
1669 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001670 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001671 }
1672
Artem Tamazovf88397c2016-06-03 14:41:17 +00001673 if (!subtargetHasRegister(*TRI, Reg))
1674 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001675 return true;
1676}
1677
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001678std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001679 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001680 SMLoc StartLoc = Tok.getLoc();
1681 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001682 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001683 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001684
Artem Tamazova01cce82016-12-27 16:00:11 +00001685 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001686 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001687 }
Artem Tamazova01cce82016-12-27 16:00:11 +00001688 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001689 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001690}
1691
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001692bool
1693AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1694 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1695 (getLexer().getKind() == AsmToken::Integer ||
1696 getLexer().getKind() == AsmToken::Real)) {
1697
1698 // This is a workaround for handling operands like these:
1699 // |1.0|
1700 // |-1|
1701 // This syntax is not compatible with syntax of standard
1702 // MC expressions (due to the trailing '|').
1703
1704 SMLoc EndLoc;
1705 const MCExpr *Expr;
1706
1707 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1708 return true;
1709 }
1710
1711 return !Expr->evaluateAsAbsolute(Val);
1712 }
1713
1714 return getParser().parseAbsoluteExpression(Val);
1715}
1716
Alex Bradbury58eba092016-11-01 16:32:05 +00001717OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001718AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001719 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001720 bool Minus = false;
1721 if (getLexer().getKind() == AsmToken::Minus) {
1722 Minus = true;
1723 Parser.Lex();
1724 }
1725
1726 SMLoc S = Parser.getTok().getLoc();
1727 switch(getLexer().getKind()) {
1728 case AsmToken::Integer: {
1729 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001730 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001731 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001732 if (Minus)
1733 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001734 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001735 return MatchOperand_Success;
1736 }
1737 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001738 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001739 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001740 return MatchOperand_ParseFail;
1741
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001742 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001743 if (Minus)
1744 F.changeSign();
1745 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001746 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001747 AMDGPUOperand::ImmTyNone, true));
1748 return MatchOperand_Success;
1749 }
1750 default:
1751 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1752 }
1753}
1754
Alex Bradbury58eba092016-11-01 16:32:05 +00001755OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00001756AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001757 if (auto R = parseRegister()) {
1758 assert(R->isReg());
1759 R->Reg.IsForcedVOP3 = isForcedVOP3();
1760 Operands.push_back(std::move(R));
1761 return MatchOperand_Success;
1762 }
Sam Kolton9772eb32017-01-11 11:46:30 +00001763 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001764}
1765
Alex Bradbury58eba092016-11-01 16:32:05 +00001766OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001767AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1768 auto res = parseImm(Operands, AbsMod);
Sam Kolton9772eb32017-01-11 11:46:30 +00001769 if (res != MatchOperand_NoMatch) {
1770 return res;
1771 }
1772
1773 return parseReg(Operands);
1774}
1775
1776OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001777AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1778 bool AllowImm) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001779 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001780
1781 if (getLexer().getKind()== AsmToken::Minus) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001782 const AsmToken NextToken = getLexer().peekTok();
1783
1784 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1785 if (NextToken.is(AsmToken::Minus)) {
1786 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1787 return MatchOperand_ParseFail;
1788 }
1789
1790 // '-' followed by an integer literal N should be interpreted as integer
1791 // negation rather than a floating-point NEG modifier applied to N.
1792 // Beside being contr-intuitive, such use of floating-point NEG modifier
1793 // results in different meaning of integer literals used with VOP1/2/C
1794 // and VOP3, for example:
1795 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1796 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1797 // Negative fp literals should be handled likewise for unifomtity
1798 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1799 Parser.Lex();
1800 Negate = true;
1801 }
1802 }
1803
1804 if (getLexer().getKind() == AsmToken::Identifier &&
1805 Parser.getTok().getString() == "neg") {
1806 if (Negate) {
1807 Error(Parser.getTok().getLoc(), "expected register or immediate");
1808 return MatchOperand_ParseFail;
1809 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001810 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001811 Negate2 = true;
1812 if (getLexer().isNot(AsmToken::LParen)) {
1813 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1814 return MatchOperand_ParseFail;
1815 }
1816 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001817 }
1818
Eugene Zelenko66203762017-01-21 00:53:49 +00001819 if (getLexer().getKind() == AsmToken::Identifier &&
1820 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001821 Parser.Lex();
1822 Abs2 = true;
1823 if (getLexer().isNot(AsmToken::LParen)) {
1824 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1825 return MatchOperand_ParseFail;
1826 }
1827 Parser.Lex();
1828 }
1829
1830 if (getLexer().getKind() == AsmToken::Pipe) {
1831 if (Abs2) {
1832 Error(Parser.getTok().getLoc(), "expected register or immediate");
1833 return MatchOperand_ParseFail;
1834 }
1835 Parser.Lex();
1836 Abs = true;
1837 }
1838
Sam Kolton9772eb32017-01-11 11:46:30 +00001839 OperandMatchResultTy Res;
1840 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001841 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00001842 } else {
1843 Res = parseReg(Operands);
1844 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001845 if (Res != MatchOperand_Success) {
1846 return Res;
1847 }
1848
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001849 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001850 if (Abs) {
1851 if (getLexer().getKind() != AsmToken::Pipe) {
1852 Error(Parser.getTok().getLoc(), "expected vertical bar");
1853 return MatchOperand_ParseFail;
1854 }
1855 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001856 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001857 }
1858 if (Abs2) {
1859 if (getLexer().isNot(AsmToken::RParen)) {
1860 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1861 return MatchOperand_ParseFail;
1862 }
1863 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001864 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001865 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001866
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001867 if (Negate) {
1868 Mods.Neg = true;
1869 } else if (Negate2) {
1870 if (getLexer().isNot(AsmToken::RParen)) {
1871 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1872 return MatchOperand_ParseFail;
1873 }
1874 Parser.Lex();
1875 Mods.Neg = true;
1876 }
1877
Sam Kolton945231a2016-06-10 09:57:59 +00001878 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001879 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001880 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001881 }
1882 return MatchOperand_Success;
1883}
1884
Alex Bradbury58eba092016-11-01 16:32:05 +00001885OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001886AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
1887 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00001888 bool Sext = false;
1889
Eugene Zelenko66203762017-01-21 00:53:49 +00001890 if (getLexer().getKind() == AsmToken::Identifier &&
1891 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00001892 Parser.Lex();
1893 Sext = true;
1894 if (getLexer().isNot(AsmToken::LParen)) {
1895 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1896 return MatchOperand_ParseFail;
1897 }
1898 Parser.Lex();
1899 }
1900
Sam Kolton9772eb32017-01-11 11:46:30 +00001901 OperandMatchResultTy Res;
1902 if (AllowImm) {
1903 Res = parseRegOrImm(Operands);
1904 } else {
1905 Res = parseReg(Operands);
1906 }
Sam Kolton945231a2016-06-10 09:57:59 +00001907 if (Res != MatchOperand_Success) {
1908 return Res;
1909 }
1910
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001911 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00001912 if (Sext) {
1913 if (getLexer().isNot(AsmToken::RParen)) {
1914 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1915 return MatchOperand_ParseFail;
1916 }
1917 Parser.Lex();
1918 Mods.Sext = true;
1919 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00001920
Sam Kolton945231a2016-06-10 09:57:59 +00001921 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001922 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001923 Op.setModifiers(Mods);
1924 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001925
Sam Kolton945231a2016-06-10 09:57:59 +00001926 return MatchOperand_Success;
1927}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001928
Sam Kolton9772eb32017-01-11 11:46:30 +00001929OperandMatchResultTy
1930AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
1931 return parseRegOrImmWithFPInputMods(Operands, false);
1932}
1933
1934OperandMatchResultTy
1935AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
1936 return parseRegOrImmWithIntInputMods(Operands, false);
1937}
1938
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001939OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1940 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1941 if (Reg) {
1942 Operands.push_back(std::move(Reg));
1943 return MatchOperand_Success;
1944 }
1945
1946 const AsmToken &Tok = Parser.getTok();
1947 if (Tok.getString() == "off") {
1948 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
1949 AMDGPUOperand::ImmTyOff, false));
1950 Parser.Lex();
1951 return MatchOperand_Success;
1952 }
1953
1954 return MatchOperand_NoMatch;
1955}
1956
Tom Stellard45bb48e2015-06-13 03:28:10 +00001957unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001958 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1959
1960 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001961 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1962 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1963 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001964 return Match_InvalidOperand;
1965
Tom Stellard88e0b252015-10-06 15:57:53 +00001966 if ((TSFlags & SIInstrFlags::VOP3) &&
1967 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1968 getForcedEncodingSize() != 64)
1969 return Match_PreferE32;
1970
Sam Koltona568e3d2016-12-22 12:57:41 +00001971 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
1972 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00001973 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001974 auto OpNum =
1975 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00001976 const auto &Op = Inst.getOperand(OpNum);
1977 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
1978 return Match_InvalidOperand;
1979 }
1980 }
1981
Matt Arsenaultfd023142017-06-12 15:55:58 +00001982 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
1983 // FIXME: Produces error without correct column reported.
1984 auto OpNum =
1985 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
1986 const auto &Op = Inst.getOperand(OpNum);
1987 if (Op.getImm() != 0)
1988 return Match_InvalidOperand;
1989 }
1990
Tom Stellard45bb48e2015-06-13 03:28:10 +00001991 return Match_Success;
1992}
1993
Matt Arsenault5f45e782017-01-09 18:44:11 +00001994// What asm variants we should check
1995ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
1996 if (getForcedEncodingSize() == 32) {
1997 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
1998 return makeArrayRef(Variants);
1999 }
2000
2001 if (isForcedVOP3()) {
2002 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2003 return makeArrayRef(Variants);
2004 }
2005
2006 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002007 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2008 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002009 return makeArrayRef(Variants);
2010 }
2011
2012 if (isForcedDPP()) {
2013 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2014 return makeArrayRef(Variants);
2015 }
2016
2017 static const unsigned Variants[] = {
2018 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002019 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002020 };
2021
2022 return makeArrayRef(Variants);
2023}
2024
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002025unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2026 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2027 const unsigned Num = Desc.getNumImplicitUses();
2028 for (unsigned i = 0; i < Num; ++i) {
2029 unsigned Reg = Desc.ImplicitUses[i];
2030 switch (Reg) {
2031 case AMDGPU::FLAT_SCR:
2032 case AMDGPU::VCC:
2033 case AMDGPU::M0:
2034 return Reg;
2035 default:
2036 break;
2037 }
2038 }
2039 return AMDGPU::NoRegister;
2040}
2041
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002042// NB: This code is correct only when used to check constant
2043// bus limitations because GFX7 support no f16 inline constants.
2044// Note that there are no cases when a GFX7 opcode violates
2045// constant bus limitations due to the use of an f16 constant.
2046bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2047 unsigned OpIdx) const {
2048 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2049
2050 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2051 return false;
2052 }
2053
2054 const MCOperand &MO = Inst.getOperand(OpIdx);
2055
2056 int64_t Val = MO.getImm();
2057 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2058
2059 switch (OpSize) { // expected operand size
2060 case 8:
2061 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2062 case 4:
2063 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2064 case 2: {
2065 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2066 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2067 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2068 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2069 } else {
2070 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2071 }
2072 }
2073 default:
2074 llvm_unreachable("invalid operand size");
2075 }
2076}
2077
2078bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2079 const MCOperand &MO = Inst.getOperand(OpIdx);
2080 if (MO.isImm()) {
2081 return !isInlineConstant(Inst, OpIdx);
2082 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002083 return !MO.isReg() ||
2084 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002085}
2086
2087bool AMDGPUAsmParser::validateOperandLimitations(const MCInst &Inst) {
2088 const unsigned Opcode = Inst.getOpcode();
2089 const MCInstrDesc &Desc = MII.get(Opcode);
2090 unsigned ConstantBusUseCount = 0;
2091
2092 if (Desc.TSFlags &
2093 (SIInstrFlags::VOPC |
2094 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002095 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2096 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002097
2098 // Check special imm operands (used by madmk, etc)
2099 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2100 ++ConstantBusUseCount;
2101 }
2102
2103 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2104 if (SGPRUsed != AMDGPU::NoRegister) {
2105 ++ConstantBusUseCount;
2106 }
2107
2108 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2109 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2110 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2111
2112 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2113
2114 for (int OpIdx : OpIndices) {
2115 if (OpIdx == -1) break;
2116
2117 const MCOperand &MO = Inst.getOperand(OpIdx);
2118 if (usesConstantBus(Inst, OpIdx)) {
2119 if (MO.isReg()) {
2120 const unsigned Reg = mc2PseudoReg(MO.getReg());
2121 // Pairs of registers with a partial intersections like these
2122 // s0, s[0:1]
2123 // flat_scratch_lo, flat_scratch
2124 // flat_scratch_lo, flat_scratch_hi
2125 // are theoretically valid but they are disabled anyway.
2126 // Note that this code mimics SIInstrInfo::verifyInstruction
2127 if (Reg != SGPRUsed) {
2128 ++ConstantBusUseCount;
2129 }
2130 SGPRUsed = Reg;
2131 } else { // Expression or a literal
2132 ++ConstantBusUseCount;
2133 }
2134 }
2135 }
2136 }
2137
2138 return ConstantBusUseCount <= 1;
2139}
2140
Tom Stellard45bb48e2015-06-13 03:28:10 +00002141bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2142 OperandVector &Operands,
2143 MCStreamer &Out,
2144 uint64_t &ErrorInfo,
2145 bool MatchingInlineAsm) {
2146 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002147 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002148 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002149 uint64_t EI;
2150 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2151 Variant);
2152 // We order match statuses from least to most specific. We use most specific
2153 // status as resulting
2154 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2155 if ((R == Match_Success) ||
2156 (R == Match_PreferE32) ||
2157 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2158 (R == Match_InvalidOperand && Result != Match_MissingFeature
2159 && Result != Match_PreferE32) ||
2160 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2161 && Result != Match_MissingFeature
2162 && Result != Match_PreferE32)) {
2163 Result = R;
2164 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002165 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002166 if (R == Match_Success)
2167 break;
2168 }
2169
2170 switch (Result) {
2171 default: break;
2172 case Match_Success:
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002173 if (!validateOperandLimitations(Inst)) {
2174 return Error(IDLoc,
2175 "invalid operand (violates constant bus restrictions)");
2176 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002177 Inst.setLoc(IDLoc);
2178 Out.EmitInstruction(Inst, getSTI());
2179 return false;
2180
2181 case Match_MissingFeature:
2182 return Error(IDLoc, "instruction not supported on this GPU");
2183
2184 case Match_MnemonicFail:
2185 return Error(IDLoc, "unrecognized instruction mnemonic");
2186
2187 case Match_InvalidOperand: {
2188 SMLoc ErrorLoc = IDLoc;
2189 if (ErrorInfo != ~0ULL) {
2190 if (ErrorInfo >= Operands.size()) {
2191 return Error(IDLoc, "too few operands for instruction");
2192 }
2193 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2194 if (ErrorLoc == SMLoc())
2195 ErrorLoc = IDLoc;
2196 }
2197 return Error(ErrorLoc, "invalid operand for instruction");
2198 }
2199
2200 case Match_PreferE32:
2201 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2202 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002203 }
2204 llvm_unreachable("Implement any new match types added!");
2205}
2206
Artem Tamazov25478d82016-12-29 15:41:52 +00002207bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2208 int64_t Tmp = -1;
2209 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2210 return true;
2211 }
2212 if (getParser().parseAbsoluteExpression(Tmp)) {
2213 return true;
2214 }
2215 Ret = static_cast<uint32_t>(Tmp);
2216 return false;
2217}
2218
Tom Stellard347ac792015-06-26 21:15:07 +00002219bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2220 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002221 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002222 return TokError("invalid major version");
2223
Tom Stellard347ac792015-06-26 21:15:07 +00002224 if (getLexer().isNot(AsmToken::Comma))
2225 return TokError("minor version number required, comma expected");
2226 Lex();
2227
Artem Tamazov25478d82016-12-29 15:41:52 +00002228 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002229 return TokError("invalid minor version");
2230
Tom Stellard347ac792015-06-26 21:15:07 +00002231 return false;
2232}
2233
2234bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00002235 uint32_t Major;
2236 uint32_t Minor;
2237
2238 if (ParseDirectiveMajorMinor(Major, Minor))
2239 return true;
2240
2241 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
2242 return false;
2243}
2244
2245bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00002246 uint32_t Major;
2247 uint32_t Minor;
2248 uint32_t Stepping;
2249 StringRef VendorName;
2250 StringRef ArchName;
2251
2252 // If this directive has no arguments, then use the ISA version for the
2253 // targeted GPU.
2254 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002255 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002256 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002257 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2258 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00002259 "AMD", "AMDGPU");
2260 return false;
2261 }
2262
Tom Stellard347ac792015-06-26 21:15:07 +00002263 if (ParseDirectiveMajorMinor(Major, Minor))
2264 return true;
2265
2266 if (getLexer().isNot(AsmToken::Comma))
2267 return TokError("stepping version number required, comma expected");
2268 Lex();
2269
Artem Tamazov25478d82016-12-29 15:41:52 +00002270 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00002271 return TokError("invalid stepping version");
2272
Tom Stellard347ac792015-06-26 21:15:07 +00002273 if (getLexer().isNot(AsmToken::Comma))
2274 return TokError("vendor name required, comma expected");
2275 Lex();
2276
2277 if (getLexer().isNot(AsmToken::String))
2278 return TokError("invalid vendor name");
2279
2280 VendorName = getLexer().getTok().getStringContents();
2281 Lex();
2282
2283 if (getLexer().isNot(AsmToken::Comma))
2284 return TokError("arch name required, comma expected");
2285 Lex();
2286
2287 if (getLexer().isNot(AsmToken::String))
2288 return TokError("invalid arch name");
2289
2290 ArchName = getLexer().getTok().getStringContents();
2291 Lex();
2292
2293 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
2294 VendorName, ArchName);
2295 return false;
2296}
2297
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002298bool AMDGPUAsmParser::ParseDirectiveCodeObjectMetadata() {
2299 std::string YamlString;
2300 raw_string_ostream YamlStream(YamlString);
Sam Kolton69c8aa22016-12-19 11:43:15 +00002301
2302 getLexer().setSkipSpace(false);
2303
2304 bool FoundEnd = false;
2305 while (!getLexer().is(AsmToken::Eof)) {
2306 while (getLexer().is(AsmToken::Space)) {
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002307 YamlStream << getLexer().getTok().getString();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002308 Lex();
2309 }
2310
2311 if (getLexer().is(AsmToken::Identifier)) {
2312 StringRef ID = getLexer().getTok().getIdentifier();
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002313 if (ID == AMDGPU::CodeObject::MetadataAssemblerDirectiveEnd) {
Sam Kolton69c8aa22016-12-19 11:43:15 +00002314 Lex();
2315 FoundEnd = true;
2316 break;
2317 }
2318 }
2319
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002320 YamlStream << Parser.parseStringToEndOfStatement()
2321 << getContext().getAsmInfo()->getSeparatorString();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002322
2323 Parser.eatToEndOfStatement();
2324 }
2325
2326 getLexer().setSkipSpace(true);
2327
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002328 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
2329 return TokError(
2330 "expected directive .end_amdgpu_code_object_metadata not found");
2331 }
Sam Kolton69c8aa22016-12-19 11:43:15 +00002332
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002333 YamlStream.flush();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002334
Konstantin Zhuravlyov4cbb6892017-03-22 23:27:09 +00002335 if (!getTargetStreamer().EmitCodeObjectMetadata(YamlString))
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002336 return Error(getParser().getTok().getLoc(), "invalid code object metadata");
Sam Kolton69c8aa22016-12-19 11:43:15 +00002337
2338 return false;
2339}
2340
Tom Stellardff7416b2015-06-26 21:58:31 +00002341bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2342 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002343 SmallString<40> ErrStr;
2344 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00002345 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002346 return TokError(Err.str());
2347 }
Tom Stellardff7416b2015-06-26 21:58:31 +00002348 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00002349 return false;
2350}
2351
2352bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00002353 amd_kernel_code_t Header;
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002354 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00002355
2356 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00002357 // Lex EndOfStatement. This is in a while loop, because lexing a comment
2358 // will set the current token to EndOfStatement.
2359 while(getLexer().is(AsmToken::EndOfStatement))
2360 Lex();
2361
2362 if (getLexer().isNot(AsmToken::Identifier))
2363 return TokError("expected value identifier or .end_amd_kernel_code_t");
2364
2365 StringRef ID = getLexer().getTok().getIdentifier();
2366 Lex();
2367
2368 if (ID == ".end_amd_kernel_code_t")
2369 break;
2370
2371 if (ParseAMDKernelCodeTValue(ID, Header))
2372 return true;
2373 }
2374
2375 getTargetStreamer().EmitAMDKernelCodeT(Header);
2376
2377 return false;
2378}
2379
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002380bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
2381 if (getLexer().isNot(AsmToken::Identifier))
2382 return TokError("expected symbol name");
2383
2384 StringRef KernelName = Parser.getTok().getString();
2385
2386 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
2387 ELF::STT_AMDGPU_HSA_KERNEL);
2388 Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00002389 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002390 return false;
2391}
2392
Tom Stellard45bb48e2015-06-13 03:28:10 +00002393bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00002394 StringRef IDVal = DirectiveID.getString();
2395
2396 if (IDVal == ".hsa_code_object_version")
2397 return ParseDirectiveHSACodeObjectVersion();
2398
2399 if (IDVal == ".hsa_code_object_isa")
2400 return ParseDirectiveHSACodeObjectISA();
2401
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002402 if (IDVal == AMDGPU::CodeObject::MetadataAssemblerDirectiveBegin)
2403 return ParseDirectiveCodeObjectMetadata();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002404
Tom Stellardff7416b2015-06-26 21:58:31 +00002405 if (IDVal == ".amd_kernel_code_t")
2406 return ParseDirectiveAMDKernelCodeT();
2407
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002408 if (IDVal == ".amdgpu_hsa_kernel")
2409 return ParseDirectiveAMDGPUHsaKernel();
2410
Tom Stellard45bb48e2015-06-13 03:28:10 +00002411 return true;
2412}
2413
Matt Arsenault68802d32015-11-05 03:11:27 +00002414bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
2415 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00002416 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00002417 return true;
2418
Matt Arsenault3b159672015-12-01 20:31:08 +00002419 if (isSI()) {
2420 // No flat_scr
2421 switch (RegNo) {
2422 case AMDGPU::FLAT_SCR:
2423 case AMDGPU::FLAT_SCR_LO:
2424 case AMDGPU::FLAT_SCR_HI:
2425 return false;
2426 default:
2427 return true;
2428 }
2429 }
2430
Matt Arsenault68802d32015-11-05 03:11:27 +00002431 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2432 // SI/CI have.
2433 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2434 R.isValid(); ++R) {
2435 if (*R == RegNo)
2436 return false;
2437 }
2438
2439 return true;
2440}
2441
Alex Bradbury58eba092016-11-01 16:32:05 +00002442OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002443AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002444 // Try to parse with a custom parser
2445 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2446
2447 // If we successfully parsed the operand or if there as an error parsing,
2448 // we are done.
2449 //
2450 // If we are parsing after we reach EndOfStatement then this means we
2451 // are appending default values to the Operands list. This is only done
2452 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002453 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00002454 getLexer().is(AsmToken::EndOfStatement))
2455 return ResTy;
2456
Sam Kolton1bdcef72016-05-23 09:59:02 +00002457 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00002458
Sam Kolton1bdcef72016-05-23 09:59:02 +00002459 if (ResTy == MatchOperand_Success)
2460 return ResTy;
2461
2462 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00002463 // If this identifier is a symbol, we want to create an expression for it.
2464 // It is a little difficult to distinguish between a symbol name, and
2465 // an instruction flag like 'gds'. In order to do this, we parse
2466 // all tokens as expressions and then treate the symbol name as the token
2467 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002468 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00002469 SMLoc S = Tok.getLoc();
2470 const MCExpr *Expr = nullptr;
2471 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002472 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00002473 return MatchOperand_Success;
2474 }
2475
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002476 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002477 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002478 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002479 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002480 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002481}
2482
Sam Kolton05ef1c92016-06-03 10:27:37 +00002483StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2484 // Clear any forced encodings from the previous instruction.
2485 setForcedEncodingSize(0);
2486 setForcedDPP(false);
2487 setForcedSDWA(false);
2488
2489 if (Name.endswith("_e64")) {
2490 setForcedEncodingSize(64);
2491 return Name.substr(0, Name.size() - 4);
2492 } else if (Name.endswith("_e32")) {
2493 setForcedEncodingSize(32);
2494 return Name.substr(0, Name.size() - 4);
2495 } else if (Name.endswith("_dpp")) {
2496 setForcedDPP(true);
2497 return Name.substr(0, Name.size() - 4);
2498 } else if (Name.endswith("_sdwa")) {
2499 setForcedSDWA(true);
2500 return Name.substr(0, Name.size() - 5);
2501 }
2502 return Name;
2503}
2504
Tom Stellard45bb48e2015-06-13 03:28:10 +00002505bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2506 StringRef Name,
2507 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002508 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00002509 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002510 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00002511
Tom Stellard45bb48e2015-06-13 03:28:10 +00002512 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00002513 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002514
2515 // Eat the comma or space if there is one.
2516 if (getLexer().is(AsmToken::Comma))
2517 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00002518
Tom Stellard45bb48e2015-06-13 03:28:10 +00002519 switch (Res) {
2520 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002521 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002522 Error(getLexer().getLoc(), "failed parsing operand.");
2523 while (!getLexer().is(AsmToken::EndOfStatement)) {
2524 Parser.Lex();
2525 }
2526 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002527 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002528 Error(getLexer().getLoc(), "not a valid operand.");
2529 while (!getLexer().is(AsmToken::EndOfStatement)) {
2530 Parser.Lex();
2531 }
2532 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002533 }
2534 }
2535
Tom Stellard45bb48e2015-06-13 03:28:10 +00002536 return false;
2537}
2538
2539//===----------------------------------------------------------------------===//
2540// Utility functions
2541//===----------------------------------------------------------------------===//
2542
Alex Bradbury58eba092016-11-01 16:32:05 +00002543OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002544AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002545 switch(getLexer().getKind()) {
2546 default: return MatchOperand_NoMatch;
2547 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002548 StringRef Name = Parser.getTok().getString();
2549 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002550 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002551 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002552
2553 Parser.Lex();
2554 if (getLexer().isNot(AsmToken::Colon))
2555 return MatchOperand_ParseFail;
2556
2557 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00002558
2559 bool IsMinus = false;
2560 if (getLexer().getKind() == AsmToken::Minus) {
2561 Parser.Lex();
2562 IsMinus = true;
2563 }
2564
Tom Stellard45bb48e2015-06-13 03:28:10 +00002565 if (getLexer().isNot(AsmToken::Integer))
2566 return MatchOperand_ParseFail;
2567
2568 if (getParser().parseAbsoluteExpression(Int))
2569 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00002570
2571 if (IsMinus)
2572 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002573 break;
2574 }
2575 }
2576 return MatchOperand_Success;
2577}
2578
Alex Bradbury58eba092016-11-01 16:32:05 +00002579OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002580AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002581 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002582 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002583 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002584 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002585
Alex Bradbury58eba092016-11-01 16:32:05 +00002586 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002587 if (Res != MatchOperand_Success)
2588 return Res;
2589
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002590 if (ConvertResult && !ConvertResult(Value)) {
2591 return MatchOperand_ParseFail;
2592 }
2593
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002594 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002595 return MatchOperand_Success;
2596}
2597
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00002598OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
2599 const char *Prefix,
2600 OperandVector &Operands,
2601 AMDGPUOperand::ImmTy ImmTy,
2602 bool (*ConvertResult)(int64_t&)) {
2603 StringRef Name = Parser.getTok().getString();
2604 if (!Name.equals(Prefix))
2605 return MatchOperand_NoMatch;
2606
2607 Parser.Lex();
2608 if (getLexer().isNot(AsmToken::Colon))
2609 return MatchOperand_ParseFail;
2610
2611 Parser.Lex();
2612 if (getLexer().isNot(AsmToken::LBrac))
2613 return MatchOperand_ParseFail;
2614 Parser.Lex();
2615
2616 unsigned Val = 0;
2617 SMLoc S = Parser.getTok().getLoc();
2618
2619 // FIXME: How to verify the number of elements matches the number of src
2620 // operands?
2621 for (int I = 0; I < 3; ++I) {
2622 if (I != 0) {
2623 if (getLexer().is(AsmToken::RBrac))
2624 break;
2625
2626 if (getLexer().isNot(AsmToken::Comma))
2627 return MatchOperand_ParseFail;
2628 Parser.Lex();
2629 }
2630
2631 if (getLexer().isNot(AsmToken::Integer))
2632 return MatchOperand_ParseFail;
2633
2634 int64_t Op;
2635 if (getParser().parseAbsoluteExpression(Op))
2636 return MatchOperand_ParseFail;
2637
2638 if (Op != 0 && Op != 1)
2639 return MatchOperand_ParseFail;
2640 Val |= (Op << I);
2641 }
2642
2643 Parser.Lex();
2644 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
2645 return MatchOperand_Success;
2646}
2647
Alex Bradbury58eba092016-11-01 16:32:05 +00002648OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002649AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002650 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002651 int64_t Bit = 0;
2652 SMLoc S = Parser.getTok().getLoc();
2653
2654 // We are at the end of the statement, and this is a default argument, so
2655 // use a default value.
2656 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2657 switch(getLexer().getKind()) {
2658 case AsmToken::Identifier: {
2659 StringRef Tok = Parser.getTok().getString();
2660 if (Tok == Name) {
2661 Bit = 1;
2662 Parser.Lex();
2663 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
2664 Bit = 0;
2665 Parser.Lex();
2666 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00002667 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002668 }
2669 break;
2670 }
2671 default:
2672 return MatchOperand_NoMatch;
2673 }
2674 }
2675
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002676 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002677 return MatchOperand_Success;
2678}
2679
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00002680static void addOptionalImmOperand(
2681 MCInst& Inst, const OperandVector& Operands,
2682 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
2683 AMDGPUOperand::ImmTy ImmT,
2684 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002685 auto i = OptionalIdx.find(ImmT);
2686 if (i != OptionalIdx.end()) {
2687 unsigned Idx = i->second;
2688 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
2689 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00002690 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002691 }
2692}
2693
Alex Bradbury58eba092016-11-01 16:32:05 +00002694OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002695AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00002696 if (getLexer().isNot(AsmToken::Identifier)) {
2697 return MatchOperand_NoMatch;
2698 }
2699 StringRef Tok = Parser.getTok().getString();
2700 if (Tok != Prefix) {
2701 return MatchOperand_NoMatch;
2702 }
2703
2704 Parser.Lex();
2705 if (getLexer().isNot(AsmToken::Colon)) {
2706 return MatchOperand_ParseFail;
2707 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002708
Sam Kolton3025e7f2016-04-26 13:33:56 +00002709 Parser.Lex();
2710 if (getLexer().isNot(AsmToken::Identifier)) {
2711 return MatchOperand_ParseFail;
2712 }
2713
2714 Value = Parser.getTok().getString();
2715 return MatchOperand_Success;
2716}
2717
Tom Stellard45bb48e2015-06-13 03:28:10 +00002718//===----------------------------------------------------------------------===//
2719// ds
2720//===----------------------------------------------------------------------===//
2721
Tom Stellard45bb48e2015-06-13 03:28:10 +00002722void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2723 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002724 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002725
2726 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2727 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2728
2729 // Add the register arguments
2730 if (Op.isReg()) {
2731 Op.addRegOperands(Inst, 1);
2732 continue;
2733 }
2734
2735 // Handle optional arguments
2736 OptionalIdx[Op.getImmTy()] = i;
2737 }
2738
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002739 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2740 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002741 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002742
Tom Stellard45bb48e2015-06-13 03:28:10 +00002743 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2744}
2745
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002746void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
2747 bool IsGdsHardcoded) {
2748 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002749
2750 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2751 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2752
2753 // Add the register arguments
2754 if (Op.isReg()) {
2755 Op.addRegOperands(Inst, 1);
2756 continue;
2757 }
2758
2759 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00002760 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002761 continue;
2762 }
2763
2764 // Handle optional arguments
2765 OptionalIdx[Op.getImmTy()] = i;
2766 }
2767
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00002768 AMDGPUOperand::ImmTy OffsetType =
2769 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
2770 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
2771 AMDGPUOperand::ImmTyOffset;
2772
2773 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
2774
Artem Tamazov43b61562017-02-03 12:47:30 +00002775 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002776 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002777 }
2778 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2779}
2780
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002781void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2782 OptionalImmIndexMap OptionalIdx;
2783
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002784 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002785 unsigned EnMask = 0;
2786 int SrcIdx = 0;
2787
2788 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2789 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2790
2791 // Add the register arguments
2792 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002793 assert(SrcIdx < 4);
2794 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002795 Op.addRegOperands(Inst, 1);
2796 ++SrcIdx;
2797 continue;
2798 }
2799
2800 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002801 assert(SrcIdx < 4);
2802 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002803 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002804 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002805 continue;
2806 }
2807
2808 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
2809 Op.addImmOperands(Inst, 1);
2810 continue;
2811 }
2812
2813 if (Op.isToken() && Op.getToken() == "done")
2814 continue;
2815
2816 // Handle optional arguments
2817 OptionalIdx[Op.getImmTy()] = i;
2818 }
2819
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002820 assert(SrcIdx == 4);
2821
2822 bool Compr = false;
2823 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
2824 Compr = true;
2825 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
2826 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
2827 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
2828 }
2829
2830 for (auto i = 0; i < SrcIdx; ++i) {
2831 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
2832 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
2833 }
2834 }
2835
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002836 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
2837 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
2838
2839 Inst.addOperand(MCOperand::createImm(EnMask));
2840}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002841
2842//===----------------------------------------------------------------------===//
2843// s_waitcnt
2844//===----------------------------------------------------------------------===//
2845
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00002846static bool
2847encodeCnt(
2848 const AMDGPU::IsaInfo::IsaVersion ISA,
2849 int64_t &IntVal,
2850 int64_t CntVal,
2851 bool Saturate,
2852 unsigned (*encode)(const IsaInfo::IsaVersion &Version, unsigned, unsigned),
2853 unsigned (*decode)(const IsaInfo::IsaVersion &Version, unsigned))
2854{
2855 bool Failed = false;
2856
2857 IntVal = encode(ISA, IntVal, CntVal);
2858 if (CntVal != decode(ISA, IntVal)) {
2859 if (Saturate) {
2860 IntVal = encode(ISA, IntVal, -1);
2861 } else {
2862 Failed = true;
2863 }
2864 }
2865 return Failed;
2866}
2867
Tom Stellard45bb48e2015-06-13 03:28:10 +00002868bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
2869 StringRef CntName = Parser.getTok().getString();
2870 int64_t CntVal;
2871
2872 Parser.Lex();
2873 if (getLexer().isNot(AsmToken::LParen))
2874 return true;
2875
2876 Parser.Lex();
2877 if (getLexer().isNot(AsmToken::Integer))
2878 return true;
2879
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00002880 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00002881 if (getParser().parseAbsoluteExpression(CntVal))
2882 return true;
2883
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002884 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002885 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Tom Stellard45bb48e2015-06-13 03:28:10 +00002886
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00002887 bool Failed = true;
2888 bool Sat = CntName.endswith("_sat");
2889
2890 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
2891 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
2892 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
2893 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
2894 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
2895 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
2896 }
2897
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00002898 if (Failed) {
2899 Error(ValLoc, "too large value for " + CntName);
2900 return true;
2901 }
2902
2903 if (getLexer().isNot(AsmToken::RParen)) {
2904 return true;
2905 }
2906
2907 Parser.Lex();
2908 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
2909 const AsmToken NextToken = getLexer().peekTok();
2910 if (NextToken.is(AsmToken::Identifier)) {
2911 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00002912 }
2913 }
2914
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00002915 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002916}
2917
Alex Bradbury58eba092016-11-01 16:32:05 +00002918OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002919AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002920 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002921 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002922 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002923 SMLoc S = Parser.getTok().getLoc();
2924
2925 switch(getLexer().getKind()) {
2926 default: return MatchOperand_ParseFail;
2927 case AsmToken::Integer:
2928 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002929 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002930 return MatchOperand_ParseFail;
2931 break;
2932
2933 case AsmToken::Identifier:
2934 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002935 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002936 return MatchOperand_ParseFail;
2937 } while(getLexer().isNot(AsmToken::EndOfStatement));
2938 break;
2939 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002940 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002941 return MatchOperand_Success;
2942}
2943
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002944bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
2945 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002946 using namespace llvm::AMDGPU::Hwreg;
2947
Artem Tamazovd6468662016-04-25 14:13:51 +00002948 if (Parser.getTok().getString() != "hwreg")
2949 return true;
2950 Parser.Lex();
2951
2952 if (getLexer().isNot(AsmToken::LParen))
2953 return true;
2954 Parser.Lex();
2955
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002956 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002957 HwReg.IsSymbolic = true;
2958 HwReg.Id = ID_UNKNOWN_;
2959 const StringRef tok = Parser.getTok().getString();
2960 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2961 if (tok == IdSymbolic[i]) {
2962 HwReg.Id = i;
2963 break;
2964 }
2965 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002966 Parser.Lex();
2967 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002968 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002969 if (getLexer().isNot(AsmToken::Integer))
2970 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002971 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002972 return true;
2973 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002974
2975 if (getLexer().is(AsmToken::RParen)) {
2976 Parser.Lex();
2977 return false;
2978 }
2979
2980 // optional params
2981 if (getLexer().isNot(AsmToken::Comma))
2982 return true;
2983 Parser.Lex();
2984
2985 if (getLexer().isNot(AsmToken::Integer))
2986 return true;
2987 if (getParser().parseAbsoluteExpression(Offset))
2988 return true;
2989
2990 if (getLexer().isNot(AsmToken::Comma))
2991 return true;
2992 Parser.Lex();
2993
2994 if (getLexer().isNot(AsmToken::Integer))
2995 return true;
2996 if (getParser().parseAbsoluteExpression(Width))
2997 return true;
2998
2999 if (getLexer().isNot(AsmToken::RParen))
3000 return true;
3001 Parser.Lex();
3002
3003 return false;
3004}
3005
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003006OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003007 using namespace llvm::AMDGPU::Hwreg;
3008
Artem Tamazovd6468662016-04-25 14:13:51 +00003009 int64_t Imm16Val = 0;
3010 SMLoc S = Parser.getTok().getLoc();
3011
3012 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00003013 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00003014 case AsmToken::Integer:
3015 // The operand can be an integer value.
3016 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00003017 return MatchOperand_NoMatch;
3018 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00003019 Error(S, "invalid immediate: only 16-bit values are legal");
3020 // Do not return error code, but create an imm operand anyway and proceed
3021 // to the next operand, if any. That avoids unneccessary error messages.
3022 }
3023 break;
3024
3025 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003026 OperandInfoTy HwReg(ID_UNKNOWN_);
3027 int64_t Offset = OFFSET_DEFAULT_;
3028 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3029 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00003030 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003031 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3032 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003033 Error(S, "invalid symbolic name of hardware register");
3034 else
3035 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00003036 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003037 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00003038 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003039 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00003040 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003041 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00003042 }
3043 break;
3044 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003045 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00003046 return MatchOperand_Success;
3047}
3048
Tom Stellard45bb48e2015-06-13 03:28:10 +00003049bool AMDGPUOperand::isSWaitCnt() const {
3050 return isImm();
3051}
3052
Artem Tamazovd6468662016-04-25 14:13:51 +00003053bool AMDGPUOperand::isHwreg() const {
3054 return isImmTy(ImmTyHwreg);
3055}
3056
Artem Tamazov6edc1352016-05-26 17:00:33 +00003057bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003058 using namespace llvm::AMDGPU::SendMsg;
3059
3060 if (Parser.getTok().getString() != "sendmsg")
3061 return true;
3062 Parser.Lex();
3063
3064 if (getLexer().isNot(AsmToken::LParen))
3065 return true;
3066 Parser.Lex();
3067
3068 if (getLexer().is(AsmToken::Identifier)) {
3069 Msg.IsSymbolic = true;
3070 Msg.Id = ID_UNKNOWN_;
3071 const std::string tok = Parser.getTok().getString();
3072 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3073 switch(i) {
3074 default: continue; // Omit gaps.
3075 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3076 }
3077 if (tok == IdSymbolic[i]) {
3078 Msg.Id = i;
3079 break;
3080 }
3081 }
3082 Parser.Lex();
3083 } else {
3084 Msg.IsSymbolic = false;
3085 if (getLexer().isNot(AsmToken::Integer))
3086 return true;
3087 if (getParser().parseAbsoluteExpression(Msg.Id))
3088 return true;
3089 if (getLexer().is(AsmToken::Integer))
3090 if (getParser().parseAbsoluteExpression(Msg.Id))
3091 Msg.Id = ID_UNKNOWN_;
3092 }
3093 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3094 return false;
3095
3096 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3097 if (getLexer().isNot(AsmToken::RParen))
3098 return true;
3099 Parser.Lex();
3100 return false;
3101 }
3102
3103 if (getLexer().isNot(AsmToken::Comma))
3104 return true;
3105 Parser.Lex();
3106
3107 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
3108 Operation.Id = ID_UNKNOWN_;
3109 if (getLexer().is(AsmToken::Identifier)) {
3110 Operation.IsSymbolic = true;
3111 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3112 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3113 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003114 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003115 for (int i = F; i < L; ++i) {
3116 if (Tok == S[i]) {
3117 Operation.Id = i;
3118 break;
3119 }
3120 }
3121 Parser.Lex();
3122 } else {
3123 Operation.IsSymbolic = false;
3124 if (getLexer().isNot(AsmToken::Integer))
3125 return true;
3126 if (getParser().parseAbsoluteExpression(Operation.Id))
3127 return true;
3128 }
3129
3130 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3131 // Stream id is optional.
3132 if (getLexer().is(AsmToken::RParen)) {
3133 Parser.Lex();
3134 return false;
3135 }
3136
3137 if (getLexer().isNot(AsmToken::Comma))
3138 return true;
3139 Parser.Lex();
3140
3141 if (getLexer().isNot(AsmToken::Integer))
3142 return true;
3143 if (getParser().parseAbsoluteExpression(StreamId))
3144 return true;
3145 }
3146
3147 if (getLexer().isNot(AsmToken::RParen))
3148 return true;
3149 Parser.Lex();
3150 return false;
3151}
3152
Matt Arsenault0e8a2992016-12-15 20:40:20 +00003153OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3154 if (getLexer().getKind() != AsmToken::Identifier)
3155 return MatchOperand_NoMatch;
3156
3157 StringRef Str = Parser.getTok().getString();
3158 int Slot = StringSwitch<int>(Str)
3159 .Case("p10", 0)
3160 .Case("p20", 1)
3161 .Case("p0", 2)
3162 .Default(-1);
3163
3164 SMLoc S = Parser.getTok().getLoc();
3165 if (Slot == -1)
3166 return MatchOperand_ParseFail;
3167
3168 Parser.Lex();
3169 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3170 AMDGPUOperand::ImmTyInterpSlot));
3171 return MatchOperand_Success;
3172}
3173
3174OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3175 if (getLexer().getKind() != AsmToken::Identifier)
3176 return MatchOperand_NoMatch;
3177
3178 StringRef Str = Parser.getTok().getString();
3179 if (!Str.startswith("attr"))
3180 return MatchOperand_NoMatch;
3181
3182 StringRef Chan = Str.take_back(2);
3183 int AttrChan = StringSwitch<int>(Chan)
3184 .Case(".x", 0)
3185 .Case(".y", 1)
3186 .Case(".z", 2)
3187 .Case(".w", 3)
3188 .Default(-1);
3189 if (AttrChan == -1)
3190 return MatchOperand_ParseFail;
3191
3192 Str = Str.drop_back(2).drop_front(4);
3193
3194 uint8_t Attr;
3195 if (Str.getAsInteger(10, Attr))
3196 return MatchOperand_ParseFail;
3197
3198 SMLoc S = Parser.getTok().getLoc();
3199 Parser.Lex();
3200 if (Attr > 63) {
3201 Error(S, "out of bounds attr");
3202 return MatchOperand_Success;
3203 }
3204
3205 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
3206
3207 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
3208 AMDGPUOperand::ImmTyInterpAttr));
3209 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
3210 AMDGPUOperand::ImmTyAttrChan));
3211 return MatchOperand_Success;
3212}
3213
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003214void AMDGPUAsmParser::errorExpTgt() {
3215 Error(Parser.getTok().getLoc(), "invalid exp target");
3216}
3217
3218OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
3219 uint8_t &Val) {
3220 if (Str == "null") {
3221 Val = 9;
3222 return MatchOperand_Success;
3223 }
3224
3225 if (Str.startswith("mrt")) {
3226 Str = Str.drop_front(3);
3227 if (Str == "z") { // == mrtz
3228 Val = 8;
3229 return MatchOperand_Success;
3230 }
3231
3232 if (Str.getAsInteger(10, Val))
3233 return MatchOperand_ParseFail;
3234
3235 if (Val > 7)
3236 errorExpTgt();
3237
3238 return MatchOperand_Success;
3239 }
3240
3241 if (Str.startswith("pos")) {
3242 Str = Str.drop_front(3);
3243 if (Str.getAsInteger(10, Val))
3244 return MatchOperand_ParseFail;
3245
3246 if (Val > 3)
3247 errorExpTgt();
3248
3249 Val += 12;
3250 return MatchOperand_Success;
3251 }
3252
3253 if (Str.startswith("param")) {
3254 Str = Str.drop_front(5);
3255 if (Str.getAsInteger(10, Val))
3256 return MatchOperand_ParseFail;
3257
3258 if (Val >= 32)
3259 errorExpTgt();
3260
3261 Val += 32;
3262 return MatchOperand_Success;
3263 }
3264
3265 if (Str.startswith("invalid_target_")) {
3266 Str = Str.drop_front(15);
3267 if (Str.getAsInteger(10, Val))
3268 return MatchOperand_ParseFail;
3269
3270 errorExpTgt();
3271 return MatchOperand_Success;
3272 }
3273
3274 return MatchOperand_NoMatch;
3275}
3276
3277OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
3278 uint8_t Val;
3279 StringRef Str = Parser.getTok().getString();
3280
3281 auto Res = parseExpTgtImpl(Str, Val);
3282 if (Res != MatchOperand_Success)
3283 return Res;
3284
3285 SMLoc S = Parser.getTok().getLoc();
3286 Parser.Lex();
3287
3288 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
3289 AMDGPUOperand::ImmTyExpTgt));
3290 return MatchOperand_Success;
3291}
3292
Alex Bradbury58eba092016-11-01 16:32:05 +00003293OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003294AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
3295 using namespace llvm::AMDGPU::SendMsg;
3296
3297 int64_t Imm16Val = 0;
3298 SMLoc S = Parser.getTok().getLoc();
3299
3300 switch(getLexer().getKind()) {
3301 default:
3302 return MatchOperand_NoMatch;
3303 case AsmToken::Integer:
3304 // The operand can be an integer value.
3305 if (getParser().parseAbsoluteExpression(Imm16Val))
3306 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003307 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003308 Error(S, "invalid immediate: only 16-bit values are legal");
3309 // Do not return error code, but create an imm operand anyway and proceed
3310 // to the next operand, if any. That avoids unneccessary error messages.
3311 }
3312 break;
3313 case AsmToken::Identifier: {
3314 OperandInfoTy Msg(ID_UNKNOWN_);
3315 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00003316 int64_t StreamId = STREAM_ID_DEFAULT_;
3317 if (parseSendMsgConstruct(Msg, Operation, StreamId))
3318 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003319 do {
3320 // Validate and encode message ID.
3321 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
3322 || Msg.Id == ID_SYSMSG)) {
3323 if (Msg.IsSymbolic)
3324 Error(S, "invalid/unsupported symbolic name of message");
3325 else
3326 Error(S, "invalid/unsupported code of message");
3327 break;
3328 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003329 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003330 // Validate and encode operation ID.
3331 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
3332 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
3333 if (Operation.IsSymbolic)
3334 Error(S, "invalid symbolic name of GS_OP");
3335 else
3336 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
3337 break;
3338 }
3339 if (Operation.Id == OP_GS_NOP
3340 && Msg.Id != ID_GS_DONE) {
3341 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
3342 break;
3343 }
3344 Imm16Val |= (Operation.Id << OP_SHIFT_);
3345 }
3346 if (Msg.Id == ID_SYSMSG) {
3347 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
3348 if (Operation.IsSymbolic)
3349 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
3350 else
3351 Error(S, "invalid/unsupported code of SYSMSG_OP");
3352 break;
3353 }
3354 Imm16Val |= (Operation.Id << OP_SHIFT_);
3355 }
3356 // Validate and encode stream ID.
3357 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3358 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
3359 Error(S, "invalid stream id: only 2-bit values are legal");
3360 break;
3361 }
3362 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
3363 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003364 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003365 }
3366 break;
3367 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003368 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003369 return MatchOperand_Success;
3370}
3371
3372bool AMDGPUOperand::isSendMsg() const {
3373 return isImmTy(ImmTySendMsg);
3374}
3375
Tom Stellard45bb48e2015-06-13 03:28:10 +00003376//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003377// parser helpers
3378//===----------------------------------------------------------------------===//
3379
3380bool
3381AMDGPUAsmParser::trySkipId(const StringRef Id) {
3382 if (getLexer().getKind() == AsmToken::Identifier &&
3383 Parser.getTok().getString() == Id) {
3384 Parser.Lex();
3385 return true;
3386 }
3387 return false;
3388}
3389
3390bool
3391AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
3392 if (getLexer().getKind() == Kind) {
3393 Parser.Lex();
3394 return true;
3395 }
3396 return false;
3397}
3398
3399bool
3400AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
3401 const StringRef ErrMsg) {
3402 if (!trySkipToken(Kind)) {
3403 Error(Parser.getTok().getLoc(), ErrMsg);
3404 return false;
3405 }
3406 return true;
3407}
3408
3409bool
3410AMDGPUAsmParser::parseExpr(int64_t &Imm) {
3411 return !getParser().parseAbsoluteExpression(Imm);
3412}
3413
3414bool
3415AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
3416 SMLoc S = Parser.getTok().getLoc();
3417 if (getLexer().getKind() == AsmToken::String) {
3418 Val = Parser.getTok().getStringContents();
3419 Parser.Lex();
3420 return true;
3421 } else {
3422 Error(S, ErrMsg);
3423 return false;
3424 }
3425}
3426
3427//===----------------------------------------------------------------------===//
3428// swizzle
3429//===----------------------------------------------------------------------===//
3430
3431LLVM_READNONE
3432static unsigned
3433encodeBitmaskPerm(const unsigned AndMask,
3434 const unsigned OrMask,
3435 const unsigned XorMask) {
3436 using namespace llvm::AMDGPU::Swizzle;
3437
3438 return BITMASK_PERM_ENC |
3439 (AndMask << BITMASK_AND_SHIFT) |
3440 (OrMask << BITMASK_OR_SHIFT) |
3441 (XorMask << BITMASK_XOR_SHIFT);
3442}
3443
3444bool
3445AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
3446 const unsigned MinVal,
3447 const unsigned MaxVal,
3448 const StringRef ErrMsg) {
3449 for (unsigned i = 0; i < OpNum; ++i) {
3450 if (!skipToken(AsmToken::Comma, "expected a comma")){
3451 return false;
3452 }
3453 SMLoc ExprLoc = Parser.getTok().getLoc();
3454 if (!parseExpr(Op[i])) {
3455 return false;
3456 }
3457 if (Op[i] < MinVal || Op[i] > MaxVal) {
3458 Error(ExprLoc, ErrMsg);
3459 return false;
3460 }
3461 }
3462
3463 return true;
3464}
3465
3466bool
3467AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
3468 using namespace llvm::AMDGPU::Swizzle;
3469
3470 int64_t Lane[LANE_NUM];
3471 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
3472 "expected a 2-bit lane id")) {
3473 Imm = QUAD_PERM_ENC;
3474 for (auto i = 0; i < LANE_NUM; ++i) {
3475 Imm |= Lane[i] << (LANE_SHIFT * i);
3476 }
3477 return true;
3478 }
3479 return false;
3480}
3481
3482bool
3483AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
3484 using namespace llvm::AMDGPU::Swizzle;
3485
3486 SMLoc S = Parser.getTok().getLoc();
3487 int64_t GroupSize;
3488 int64_t LaneIdx;
3489
3490 if (!parseSwizzleOperands(1, &GroupSize,
3491 2, 32,
3492 "group size must be in the interval [2,32]")) {
3493 return false;
3494 }
3495 if (!isPowerOf2_64(GroupSize)) {
3496 Error(S, "group size must be a power of two");
3497 return false;
3498 }
3499 if (parseSwizzleOperands(1, &LaneIdx,
3500 0, GroupSize - 1,
3501 "lane id must be in the interval [0,group size - 1]")) {
3502 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
3503 return true;
3504 }
3505 return false;
3506}
3507
3508bool
3509AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
3510 using namespace llvm::AMDGPU::Swizzle;
3511
3512 SMLoc S = Parser.getTok().getLoc();
3513 int64_t GroupSize;
3514
3515 if (!parseSwizzleOperands(1, &GroupSize,
3516 2, 32, "group size must be in the interval [2,32]")) {
3517 return false;
3518 }
3519 if (!isPowerOf2_64(GroupSize)) {
3520 Error(S, "group size must be a power of two");
3521 return false;
3522 }
3523
3524 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
3525 return true;
3526}
3527
3528bool
3529AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
3530 using namespace llvm::AMDGPU::Swizzle;
3531
3532 SMLoc S = Parser.getTok().getLoc();
3533 int64_t GroupSize;
3534
3535 if (!parseSwizzleOperands(1, &GroupSize,
3536 1, 16, "group size must be in the interval [1,16]")) {
3537 return false;
3538 }
3539 if (!isPowerOf2_64(GroupSize)) {
3540 Error(S, "group size must be a power of two");
3541 return false;
3542 }
3543
3544 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
3545 return true;
3546}
3547
3548bool
3549AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
3550 using namespace llvm::AMDGPU::Swizzle;
3551
3552 if (!skipToken(AsmToken::Comma, "expected a comma")) {
3553 return false;
3554 }
3555
3556 StringRef Ctl;
3557 SMLoc StrLoc = Parser.getTok().getLoc();
3558 if (!parseString(Ctl)) {
3559 return false;
3560 }
3561 if (Ctl.size() != BITMASK_WIDTH) {
3562 Error(StrLoc, "expected a 5-character mask");
3563 return false;
3564 }
3565
3566 unsigned AndMask = 0;
3567 unsigned OrMask = 0;
3568 unsigned XorMask = 0;
3569
3570 for (size_t i = 0; i < Ctl.size(); ++i) {
3571 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
3572 switch(Ctl[i]) {
3573 default:
3574 Error(StrLoc, "invalid mask");
3575 return false;
3576 case '0':
3577 break;
3578 case '1':
3579 OrMask |= Mask;
3580 break;
3581 case 'p':
3582 AndMask |= Mask;
3583 break;
3584 case 'i':
3585 AndMask |= Mask;
3586 XorMask |= Mask;
3587 break;
3588 }
3589 }
3590
3591 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
3592 return true;
3593}
3594
3595bool
3596AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
3597
3598 SMLoc OffsetLoc = Parser.getTok().getLoc();
3599
3600 if (!parseExpr(Imm)) {
3601 return false;
3602 }
3603 if (!isUInt<16>(Imm)) {
3604 Error(OffsetLoc, "expected a 16-bit offset");
3605 return false;
3606 }
3607 return true;
3608}
3609
3610bool
3611AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
3612 using namespace llvm::AMDGPU::Swizzle;
3613
3614 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
3615
3616 SMLoc ModeLoc = Parser.getTok().getLoc();
3617 bool Ok = false;
3618
3619 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
3620 Ok = parseSwizzleQuadPerm(Imm);
3621 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
3622 Ok = parseSwizzleBitmaskPerm(Imm);
3623 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
3624 Ok = parseSwizzleBroadcast(Imm);
3625 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
3626 Ok = parseSwizzleSwap(Imm);
3627 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
3628 Ok = parseSwizzleReverse(Imm);
3629 } else {
3630 Error(ModeLoc, "expected a swizzle mode");
3631 }
3632
3633 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
3634 }
3635
3636 return false;
3637}
3638
3639OperandMatchResultTy
3640AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
3641 SMLoc S = Parser.getTok().getLoc();
3642 int64_t Imm = 0;
3643
3644 if (trySkipId("offset")) {
3645
3646 bool Ok = false;
3647 if (skipToken(AsmToken::Colon, "expected a colon")) {
3648 if (trySkipId("swizzle")) {
3649 Ok = parseSwizzleMacro(Imm);
3650 } else {
3651 Ok = parseSwizzleOffset(Imm);
3652 }
3653 }
3654
3655 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
3656
3657 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
3658 } else {
3659 return MatchOperand_NoMatch;
3660 }
3661}
3662
3663bool
3664AMDGPUOperand::isSwizzle() const {
3665 return isImmTy(ImmTySwizzle);
3666}
3667
3668//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003669// sopp branch targets
3670//===----------------------------------------------------------------------===//
3671
Alex Bradbury58eba092016-11-01 16:32:05 +00003672OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003673AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
3674 SMLoc S = Parser.getTok().getLoc();
3675
3676 switch (getLexer().getKind()) {
3677 default: return MatchOperand_ParseFail;
3678 case AsmToken::Integer: {
3679 int64_t Imm;
3680 if (getParser().parseAbsoluteExpression(Imm))
3681 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003682 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003683 return MatchOperand_Success;
3684 }
3685
3686 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003687 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00003688 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
3689 Parser.getTok().getString()), getContext()), S));
3690 Parser.Lex();
3691 return MatchOperand_Success;
3692 }
3693}
3694
3695//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003696// mubuf
3697//===----------------------------------------------------------------------===//
3698
Sam Kolton5f10a132016-05-06 11:31:17 +00003699AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003700 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00003701}
3702
3703AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003704 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00003705}
3706
3707AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003708 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00003709}
3710
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003711void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
3712 const OperandVector &Operands,
3713 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003714 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003715 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003716
3717 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3718 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3719
3720 // Add the register arguments
3721 if (Op.isReg()) {
3722 Op.addRegOperands(Inst, 1);
3723 continue;
3724 }
3725
3726 // Handle the case where soffset is an immediate
3727 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3728 Op.addImmOperands(Inst, 1);
3729 continue;
3730 }
3731
3732 // Handle tokens like 'offen' which are sometimes hard-coded into the
3733 // asm string. There are no MCInst operands for these.
3734 if (Op.isToken()) {
3735 continue;
3736 }
3737 assert(Op.isImm());
3738
3739 // Handle optional arguments
3740 OptionalIdx[Op.getImmTy()] = i;
3741 }
3742
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003743 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
3744 if (IsAtomicReturn) {
3745 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
3746 Inst.insert(I, *I);
3747 }
3748
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003749 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003750 if (!IsAtomic) { // glc is hard-coded.
3751 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3752 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003753 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3754 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003755}
3756
3757//===----------------------------------------------------------------------===//
3758// mimg
3759//===----------------------------------------------------------------------===//
3760
Sam Kolton1bdcef72016-05-23 09:59:02 +00003761void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
3762 unsigned I = 1;
3763 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3764 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3765 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3766 }
3767
3768 OptionalImmIndexMap OptionalIdx;
3769
3770 for (unsigned E = Operands.size(); I != E; ++I) {
3771 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3772
3773 // Add the register arguments
3774 if (Op.isRegOrImm()) {
3775 Op.addRegOrImmOperands(Inst, 1);
3776 continue;
3777 } else if (Op.isImmModifier()) {
3778 OptionalIdx[Op.getImmTy()] = I;
3779 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00003780 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00003781 }
3782 }
3783
3784 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
3785 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
3786 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3787 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
3788 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
3789 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3790 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
3791 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3792}
3793
3794void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
3795 unsigned I = 1;
3796 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3797 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3798 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3799 }
3800
3801 // Add src, same as dst
3802 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
3803
3804 OptionalImmIndexMap OptionalIdx;
3805
3806 for (unsigned E = Operands.size(); I != E; ++I) {
3807 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3808
3809 // Add the register arguments
3810 if (Op.isRegOrImm()) {
3811 Op.addRegOrImmOperands(Inst, 1);
3812 continue;
3813 } else if (Op.isImmModifier()) {
3814 OptionalIdx[Op.getImmTy()] = I;
3815 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00003816 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00003817 }
3818 }
3819
3820 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
3821 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
3822 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3823 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
3824 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
3825 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3826 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
3827 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3828}
3829
Sam Kolton5f10a132016-05-06 11:31:17 +00003830AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003831 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00003832}
3833
3834AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003835 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00003836}
3837
3838AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003839 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00003840}
3841
3842AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003843 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00003844}
3845
3846AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003847 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00003848}
3849
Tom Stellard45bb48e2015-06-13 03:28:10 +00003850//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00003851// smrd
3852//===----------------------------------------------------------------------===//
3853
Artem Tamazov54bfd542016-10-31 16:07:39 +00003854bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00003855 return isImm() && isUInt<8>(getImm());
3856}
3857
Artem Tamazov54bfd542016-10-31 16:07:39 +00003858bool AMDGPUOperand::isSMRDOffset20() const {
3859 return isImm() && isUInt<20>(getImm());
3860}
3861
Tom Stellard217361c2015-08-06 19:28:38 +00003862bool AMDGPUOperand::isSMRDLiteralOffset() const {
3863 // 32-bit literals are only supported on CI and we only want to use them
3864 // when the offset is > 8-bits.
3865 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
3866}
3867
Artem Tamazov54bfd542016-10-31 16:07:39 +00003868AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
3869 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3870}
3871
3872AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003873 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003874}
3875
3876AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003877 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003878}
3879
Matt Arsenaultfd023142017-06-12 15:55:58 +00003880AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
3881 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3882}
3883
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003884AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
3885 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3886}
3887
Tom Stellard217361c2015-08-06 19:28:38 +00003888//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003889// vop3
3890//===----------------------------------------------------------------------===//
3891
3892static bool ConvertOmodMul(int64_t &Mul) {
3893 if (Mul != 1 && Mul != 2 && Mul != 4)
3894 return false;
3895
3896 Mul >>= 1;
3897 return true;
3898}
3899
3900static bool ConvertOmodDiv(int64_t &Div) {
3901 if (Div == 1) {
3902 Div = 0;
3903 return true;
3904 }
3905
3906 if (Div == 2) {
3907 Div = 3;
3908 return true;
3909 }
3910
3911 return false;
3912}
3913
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003914static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
3915 if (BoundCtrl == 0) {
3916 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003917 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00003918 }
3919
3920 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003921 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003922 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003923 }
Matt Arsenault12c53892016-11-15 19:58:54 +00003924
Tom Stellard45bb48e2015-06-13 03:28:10 +00003925 return false;
3926}
3927
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003928// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00003929static const OptionalOperand AMDGPUOptionalOperandTable[] = {
3930 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
3931 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
3932 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
3933 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
3934 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
3935 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
3936 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
3937 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
3938 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
3939 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
3940 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
3941 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
3942 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
3943 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
3944 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
3945 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
3946 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
3947 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
3948 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
3949 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00003950 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
3951 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
3952 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00003953 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003954 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003955 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003956 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
3957 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
3958 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
3959 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003960};
Tom Stellard45bb48e2015-06-13 03:28:10 +00003961
Alex Bradbury58eba092016-11-01 16:32:05 +00003962OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00003963 OperandMatchResultTy res;
3964 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
3965 // try to parse any optional operand here
3966 if (Op.IsBit) {
3967 res = parseNamedBit(Op.Name, Operands, Op.Type);
3968 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
3969 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003970 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
3971 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
3972 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
3973 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00003974 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
3975 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003976 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
3977 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
3978 Op.Type == AMDGPUOperand::ImmTyNegLo ||
3979 Op.Type == AMDGPUOperand::ImmTyNegHi) {
3980 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
3981 Op.ConvertResult);
Sam Kolton11de3702016-05-24 12:38:33 +00003982 } else {
3983 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
3984 }
3985 if (res != MatchOperand_NoMatch) {
3986 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003987 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003988 }
3989 return MatchOperand_NoMatch;
3990}
3991
Matt Arsenault12c53892016-11-15 19:58:54 +00003992OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003993 StringRef Name = Parser.getTok().getString();
3994 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00003995 return parseIntWithPrefix("mul", Operands,
3996 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003997 }
Matt Arsenault12c53892016-11-15 19:58:54 +00003998
3999 if (Name == "div") {
4000 return parseIntWithPrefix("div", Operands,
4001 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4002 }
4003
4004 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004005}
4006
Tom Stellarda90b9522016-02-11 03:28:15 +00004007void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
4008 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00004009 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00004010 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004011 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4012 }
4013 for (unsigned E = Operands.size(); I != E; ++I)
4014 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
4015}
4016
4017void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004018 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
4019 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004020 cvtVOP3(Inst, Operands);
4021 } else {
4022 cvtId(Inst, Operands);
4023 }
4024}
4025
Sam Koltona3ec5c12016-10-07 14:46:06 +00004026static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4027 // 1. This operand is input modifiers
4028 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4029 // 2. This is not last operand
4030 && Desc.NumOperands > (OpNum + 1)
4031 // 3. Next operand is register class
4032 && Desc.OpInfo[OpNum + 1].RegClass != -1
4033 // 4. Next register is not tied to any other operand
4034 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4035}
4036
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004037void AMDGPUAsmParser::cvtVOP3Impl(MCInst &Inst, const OperandVector &Operands,
4038 OptionalImmIndexMap &OptionalIdx) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004039 unsigned I = 1;
4040 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00004041 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004042 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00004043 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004044
Tom Stellarda90b9522016-02-11 03:28:15 +00004045 for (unsigned E = Operands.size(); I != E; ++I) {
4046 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004047 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00004048 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Dmitry Preobrazhensky5ac9fd62017-04-12 16:31:18 +00004049 } else if (Op.isImmModifier()) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00004050 OptionalIdx[Op.getImmTy()] = I;
Dmitry Preobrazhensky5ac9fd62017-04-12 16:31:18 +00004051 } else if (Op.isRegOrImm()) {
4052 Op.addRegOrImmOperands(Inst, 1);
Tom Stellarda90b9522016-02-11 03:28:15 +00004053 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00004054 llvm_unreachable("unhandled operand type");
Tom Stellard45bb48e2015-06-13 03:28:10 +00004055 }
Tom Stellarda90b9522016-02-11 03:28:15 +00004056 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004057}
4058
4059void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
4060 OptionalImmIndexMap OptionalIdx;
4061
4062 cvtVOP3Impl(Inst, Operands, OptionalIdx);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004063
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004064 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4065 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004066
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004067 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00004068 // it has src2 register operand that is tied to dst operand
4069 // we don't allow modifiers for this operand in assembler so src2_modifiers
4070 // should be 0
4071 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004072 Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
4073 Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004074 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004075 std::advance(
4076 it,
4077 AMDGPU::getNamedOperandIdx(Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ?
4078 AMDGPU::V_MAC_F16_e64 :
4079 AMDGPU::V_MAC_F32_e64,
4080 AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004081 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
4082 ++it;
4083 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4084 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004085}
4086
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00004087void AMDGPUAsmParser::cvtVOP3OMod(MCInst &Inst, const OperandVector &Operands) {
4088 OptionalImmIndexMap OptionalIdx;
4089
4090 unsigned I = 1;
4091 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4092 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4093 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4094 }
4095
4096 for (unsigned E = Operands.size(); I != E; ++I) {
4097 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4098 if (Op.isMod()) {
4099 OptionalIdx[Op.getImmTy()] = I;
4100 } else {
4101 Op.addRegOrImmOperands(Inst, 1);
4102 }
4103 }
4104
4105 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4106 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4107}
4108
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004109void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) {
4110 OptionalImmIndexMap OptIdx;
4111
4112 cvtVOP3Impl(Inst, Operands, OptIdx);
4113
4114 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
4115 // instruction, and then figure out where to actually put the modifiers
4116 int Opc = Inst.getOpcode();
4117
4118 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4119 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyClampSI);
4120 }
4121
4122 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
4123 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi, -1);
4124
4125 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
4126 if (NegLoIdx != -1) {
4127 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
4128 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
4129 }
4130
4131 const int Ops[] = { AMDGPU::OpName::src0,
4132 AMDGPU::OpName::src1,
4133 AMDGPU::OpName::src2 };
4134 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
4135 AMDGPU::OpName::src1_modifiers,
4136 AMDGPU::OpName::src2_modifiers };
4137
4138 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4139 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
4140
4141 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4142 unsigned OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
4143 unsigned NegLo = 0;
4144 unsigned NegHi = 0;
4145
4146 if (NegLoIdx != -1) {
4147 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
4148 NegLo = Inst.getOperand(NegLoIdx).getImm();
4149 NegHi = Inst.getOperand(NegHiIdx).getImm();
4150 }
4151
4152 for (int J = 0; J < 3; ++J) {
4153 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
4154 if (OpIdx == -1)
4155 break;
4156
4157 uint32_t ModVal = 0;
4158
4159 if ((OpSel & (1 << J)) != 0)
4160 ModVal |= SISrcMods::OP_SEL_0;
4161
4162 if ((OpSelHi & (1 << J)) != 0)
4163 ModVal |= SISrcMods::OP_SEL_1;
4164
4165 if ((NegLo & (1 << J)) != 0)
4166 ModVal |= SISrcMods::NEG;
4167
4168 if ((NegHi & (1 << J)) != 0)
4169 ModVal |= SISrcMods::NEG_HI;
4170
4171 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
4172
4173 Inst.getOperand(ModIdx).setImm(ModVal);
4174 }
4175}
4176
Sam Koltondfa29f72016-03-09 12:29:31 +00004177//===----------------------------------------------------------------------===//
4178// dpp
4179//===----------------------------------------------------------------------===//
4180
4181bool AMDGPUOperand::isDPPCtrl() const {
4182 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
4183 if (result) {
4184 int64_t Imm = getImm();
4185 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
4186 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
4187 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
4188 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
4189 (Imm == 0x130) ||
4190 (Imm == 0x134) ||
4191 (Imm == 0x138) ||
4192 (Imm == 0x13c) ||
4193 (Imm == 0x140) ||
4194 (Imm == 0x141) ||
4195 (Imm == 0x142) ||
4196 (Imm == 0x143);
4197 }
4198 return false;
4199}
4200
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00004201bool AMDGPUOperand::isGPRIdxMode() const {
4202 return isImm() && isUInt<4>(getImm());
4203}
4204
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00004205bool AMDGPUOperand::isS16Imm() const {
4206 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
4207}
4208
4209bool AMDGPUOperand::isU16Imm() const {
4210 return isImm() && isUInt<16>(getImm());
4211}
4212
Alex Bradbury58eba092016-11-01 16:32:05 +00004213OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00004214AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00004215 SMLoc S = Parser.getTok().getLoc();
4216 StringRef Prefix;
4217 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00004218
Sam Koltona74cd522016-03-18 15:35:51 +00004219 if (getLexer().getKind() == AsmToken::Identifier) {
4220 Prefix = Parser.getTok().getString();
4221 } else {
4222 return MatchOperand_NoMatch;
4223 }
4224
4225 if (Prefix == "row_mirror") {
4226 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004227 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004228 } else if (Prefix == "row_half_mirror") {
4229 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004230 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004231 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00004232 // Check to prevent parseDPPCtrlOps from eating invalid tokens
4233 if (Prefix != "quad_perm"
4234 && Prefix != "row_shl"
4235 && Prefix != "row_shr"
4236 && Prefix != "row_ror"
4237 && Prefix != "wave_shl"
4238 && Prefix != "wave_rol"
4239 && Prefix != "wave_shr"
4240 && Prefix != "wave_ror"
4241 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00004242 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00004243 }
4244
Sam Koltona74cd522016-03-18 15:35:51 +00004245 Parser.Lex();
4246 if (getLexer().isNot(AsmToken::Colon))
4247 return MatchOperand_ParseFail;
4248
4249 if (Prefix == "quad_perm") {
4250 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00004251 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004252 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00004253 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004254 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00004255
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004256 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00004257 return MatchOperand_ParseFail;
4258
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004259 for (int i = 0; i < 3; ++i) {
4260 if (getLexer().isNot(AsmToken::Comma))
4261 return MatchOperand_ParseFail;
4262 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00004263
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004264 int64_t Temp;
4265 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
4266 return MatchOperand_ParseFail;
4267 const int shift = i*2 + 2;
4268 Int += (Temp << shift);
4269 }
Sam Koltona74cd522016-03-18 15:35:51 +00004270
Sam Koltona74cd522016-03-18 15:35:51 +00004271 if (getLexer().isNot(AsmToken::RBrac))
4272 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004273 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004274
4275 } else {
4276 // sel:%d
4277 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004278 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00004279 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004280
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004281 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00004282 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004283 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00004284 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004285 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00004286 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004287 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004288 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004289 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004290 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004291 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004292 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004293 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004294 Int = 0x13C;
4295 } else if (Prefix == "row_bcast") {
4296 if (Int == 15) {
4297 Int = 0x142;
4298 } else if (Int == 31) {
4299 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00004300 } else {
4301 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004302 }
4303 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00004304 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004305 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004306 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004307 }
Sam Koltona74cd522016-03-18 15:35:51 +00004308
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004309 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00004310 return MatchOperand_Success;
4311}
4312
Sam Kolton5f10a132016-05-06 11:31:17 +00004313AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004314 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00004315}
4316
Sam Kolton5f10a132016-05-06 11:31:17 +00004317AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004318 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00004319}
4320
Sam Kolton5f10a132016-05-06 11:31:17 +00004321AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004322 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00004323}
4324
4325void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00004326 OptionalImmIndexMap OptionalIdx;
4327
4328 unsigned I = 1;
4329 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4330 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4331 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4332 }
4333
4334 for (unsigned E = Operands.size(); I != E; ++I) {
4335 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4336 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00004337 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00004338 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00004339 // Skip it.
4340 continue;
4341 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00004342 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00004343 } else if (Op.isDPPCtrl()) {
4344 Op.addImmOperands(Inst, 1);
4345 } else if (Op.isImm()) {
4346 // Handle optional arguments
4347 OptionalIdx[Op.getImmTy()] = I;
4348 } else {
4349 llvm_unreachable("Invalid operand type");
4350 }
4351 }
4352
Sam Koltondfa29f72016-03-09 12:29:31 +00004353 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
4354 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
4355 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004356
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004357 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00004358 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004359 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp ||
4360 Inst.getOpcode() == AMDGPU::V_MAC_F16_dpp) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004361 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004362 std::advance(
4363 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004364 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4365 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004366}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00004367
Sam Kolton3025e7f2016-04-26 13:33:56 +00004368//===----------------------------------------------------------------------===//
4369// sdwa
4370//===----------------------------------------------------------------------===//
4371
Alex Bradbury58eba092016-11-01 16:32:05 +00004372OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00004373AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
4374 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004375 using namespace llvm::AMDGPU::SDWA;
4376
Sam Kolton3025e7f2016-04-26 13:33:56 +00004377 SMLoc S = Parser.getTok().getLoc();
4378 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004379 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004380
Sam Kolton05ef1c92016-06-03 10:27:37 +00004381 res = parseStringWithPrefix(Prefix, Value);
4382 if (res != MatchOperand_Success) {
4383 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004384 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00004385
Sam Kolton3025e7f2016-04-26 13:33:56 +00004386 int64_t Int;
4387 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004388 .Case("BYTE_0", SdwaSel::BYTE_0)
4389 .Case("BYTE_1", SdwaSel::BYTE_1)
4390 .Case("BYTE_2", SdwaSel::BYTE_2)
4391 .Case("BYTE_3", SdwaSel::BYTE_3)
4392 .Case("WORD_0", SdwaSel::WORD_0)
4393 .Case("WORD_1", SdwaSel::WORD_1)
4394 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004395 .Default(0xffffffff);
4396 Parser.Lex(); // eat last token
4397
4398 if (Int == 0xffffffff) {
4399 return MatchOperand_ParseFail;
4400 }
4401
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004402 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004403 return MatchOperand_Success;
4404}
4405
Alex Bradbury58eba092016-11-01 16:32:05 +00004406OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00004407AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004408 using namespace llvm::AMDGPU::SDWA;
4409
Sam Kolton3025e7f2016-04-26 13:33:56 +00004410 SMLoc S = Parser.getTok().getLoc();
4411 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004412 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004413
4414 res = parseStringWithPrefix("dst_unused", Value);
4415 if (res != MatchOperand_Success) {
4416 return res;
4417 }
4418
4419 int64_t Int;
4420 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004421 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
4422 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
4423 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004424 .Default(0xffffffff);
4425 Parser.Lex(); // eat last token
4426
4427 if (Int == 0xffffffff) {
4428 return MatchOperand_ParseFail;
4429 }
4430
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004431 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004432 return MatchOperand_Success;
4433}
4434
Sam Kolton945231a2016-06-10 09:57:59 +00004435void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004436 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004437}
4438
Sam Kolton945231a2016-06-10 09:57:59 +00004439void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004440 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
4441}
4442
Sam Koltonf7659d712017-05-23 10:08:55 +00004443void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
4444 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
4445}
4446
Sam Kolton5196b882016-07-01 09:59:21 +00004447void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00004448 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00004449}
4450
4451void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00004452 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00004453 using namespace llvm::AMDGPU::SDWA;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004454 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00004455 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004456
4457 unsigned I = 1;
4458 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4459 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4460 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4461 }
4462
4463 for (unsigned E = Operands.size(); I != E; ++I) {
4464 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltonf7659d712017-05-23 10:08:55 +00004465 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4466 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
4467 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
4468 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
4469 // Skip VCC only if we didn't skip it on previous iteration.
4470 if (BasicInstType == SIInstrFlags::VOP2 &&
4471 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
4472 skippedVcc = true;
4473 continue;
4474 } else if (BasicInstType == SIInstrFlags::VOPC &&
4475 Inst.getNumOperands() == 0) {
4476 skippedVcc = true;
4477 continue;
4478 }
4479 }
4480 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00004481 Op.addRegWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004482 } else if (Op.isImm()) {
4483 // Handle optional arguments
4484 OptionalIdx[Op.getImmTy()] = I;
4485 } else {
4486 llvm_unreachable("Invalid operand type");
4487 }
Sam Koltonf7659d712017-05-23 10:08:55 +00004488 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004489 }
4490
Sam Koltonf7659d712017-05-23 10:08:55 +00004491 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
4492 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Koltona568e3d2016-12-22 12:57:41 +00004493 // V_NOP_sdwa_vi has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00004494 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004495 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00004496 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4497 if (isGFX9() &&
4498 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4499 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4500 }
Sam Kolton9dffada2017-01-17 15:26:02 +00004501 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4502 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4503 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004504 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004505
4506 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00004507 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4508 if (isGFX9() &&
4509 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4510 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4511 }
Sam Kolton9dffada2017-01-17 15:26:02 +00004512 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4513 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4514 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4515 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004516 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004517
4518 case SIInstrFlags::VOPC:
Sam Koltonf7659d712017-05-23 10:08:55 +00004519 if (isVI()) {
4520 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4521 }
Sam Kolton9dffada2017-01-17 15:26:02 +00004522 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4523 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004524 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004525
Sam Koltona3ec5c12016-10-07 14:46:06 +00004526 default:
4527 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
4528 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00004529 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00004530
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004531 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00004532 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00004533 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
4534 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004535 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004536 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00004537 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004538 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00004539 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00004540}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00004541
Tom Stellard45bb48e2015-06-13 03:28:10 +00004542/// Force static initialization.
4543extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00004544 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
4545 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00004546}
4547
4548#define GET_REGISTER_MATCHER
4549#define GET_MATCHER_IMPLEMENTATION
4550#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00004551
Sam Kolton11de3702016-05-24 12:38:33 +00004552// This fuction should be defined after auto-generated include so that we have
4553// MatchClassKind enum defined
4554unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
4555 unsigned Kind) {
4556 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00004557 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00004558 // operand. This method checks if we are given immediate operand but expect to
4559 // get corresponding token.
4560 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
4561 switch (Kind) {
4562 case MCK_addr64:
4563 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
4564 case MCK_gds:
4565 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
4566 case MCK_glc:
4567 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
4568 case MCK_idxen:
4569 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
4570 case MCK_offen:
4571 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004572 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00004573 // When operands have expression values, they will return true for isToken,
4574 // because it is not possible to distinguish between a token and an
4575 // expression at parse time. MatchInstructionImpl() will always try to
4576 // match an operand as a token, when isToken returns true, and when the
4577 // name of the expression is not a valid token, the match will fail,
4578 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004579 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
4580 case MCK_SSrcF32:
4581 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00004582 case MCK_SoppBrTarget:
4583 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004584 case MCK_VReg32OrOff:
4585 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00004586 case MCK_InterpSlot:
4587 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
4588 case MCK_Attr:
4589 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
4590 case MCK_AttrChan:
4591 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004592 default:
4593 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00004594 }
4595}