blob: da9d009c542b6de6c102b05d8e6155754156e512 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000018#include "llvm/ADT/APInt.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000021#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "llvm/ADT/StringSwitch.h"
24#include "llvm/ADT/Twine.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000025#include "llvm/CodeGen/MachineValueType.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000026#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000027#include "llvm/MC/MCContext.h"
28#include "llvm/MC/MCExpr.h"
29#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000030#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000031#include "llvm/MC/MCInstrInfo.h"
32#include "llvm/MC/MCParser/MCAsmLexer.h"
33#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000034#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000036#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/MC/MCRegisterInfo.h"
38#include "llvm/MC/MCStreamer.h"
39#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000040#include "llvm/MC/MCSymbol.h"
41#include "llvm/Support/Casting.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000042#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000043#include "llvm/Support/ELF.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000044#include "llvm/Support/ErrorHandling.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000045#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/raw_ostream.h"
47#include "llvm/Support/SMLoc.h"
48#include "llvm/Support/TargetRegistry.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000049#include "llvm/Support/raw_ostream.h"
50#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000051#include <algorithm>
52#include <cassert>
53#include <cstdint>
54#include <cstring>
55#include <iterator>
56#include <map>
57#include <memory>
58#include <string>
59#include <vector>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000060
Tom Stellard45bb48e2015-06-13 03:28:10 +000061using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000062using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000063
64namespace {
65
Sam Kolton1eeb11b2016-09-09 14:44:04 +000066class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000067
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000068enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
69
Sam Kolton1eeb11b2016-09-09 14:44:04 +000070//===----------------------------------------------------------------------===//
71// Operand
72//===----------------------------------------------------------------------===//
73
Tom Stellard45bb48e2015-06-13 03:28:10 +000074class AMDGPUOperand : public MCParsedAsmOperand {
75 enum KindTy {
76 Token,
77 Immediate,
78 Register,
79 Expression
80 } Kind;
81
82 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000083 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000084
85public:
Sam Kolton1eeb11b2016-09-09 14:44:04 +000086 AMDGPUOperand(enum KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
87 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000088
Sam Kolton5f10a132016-05-06 11:31:17 +000089 typedef std::unique_ptr<AMDGPUOperand> Ptr;
90
Sam Kolton945231a2016-06-10 09:57:59 +000091 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000092 bool Abs = false;
93 bool Neg = false;
94 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000095
96 bool hasFPModifiers() const { return Abs || Neg; }
97 bool hasIntModifiers() const { return Sext; }
98 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
99
100 int64_t getFPModifiersOperand() const {
101 int64_t Operand = 0;
102 Operand |= Abs ? SISrcMods::ABS : 0;
103 Operand |= Neg ? SISrcMods::NEG : 0;
104 return Operand;
105 }
106
107 int64_t getIntModifiersOperand() const {
108 int64_t Operand = 0;
109 Operand |= Sext ? SISrcMods::SEXT : 0;
110 return Operand;
111 }
112
113 int64_t getModifiersOperand() const {
114 assert(!(hasFPModifiers() && hasIntModifiers())
115 && "fp and int modifiers should not be used simultaneously");
116 if (hasFPModifiers()) {
117 return getFPModifiersOperand();
118 } else if (hasIntModifiers()) {
119 return getIntModifiersOperand();
120 } else {
121 return 0;
122 }
123 }
124
125 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
126 };
127
Tom Stellard45bb48e2015-06-13 03:28:10 +0000128 enum ImmTy {
129 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000130 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000131 ImmTyOffen,
132 ImmTyIdxen,
133 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000135 ImmTyOffset0,
136 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000137 ImmTyGLC,
138 ImmTySLC,
139 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000140 ImmTyClampSI,
141 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000142 ImmTyDppCtrl,
143 ImmTyDppRowMask,
144 ImmTyDppBankMask,
145 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000146 ImmTySdwaDstSel,
147 ImmTySdwaSrc0Sel,
148 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000149 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000150 ImmTyDMask,
151 ImmTyUNorm,
152 ImmTyDA,
153 ImmTyR128,
154 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000155 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000156 ImmTyExpCompr,
157 ImmTyExpVM,
Artem Tamazovd6468662016-04-25 14:13:51 +0000158 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000159 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000160 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000161 ImmTyInterpSlot,
162 ImmTyInterpAttr,
163 ImmTyAttrChan
Tom Stellard45bb48e2015-06-13 03:28:10 +0000164 };
165
166 struct TokOp {
167 const char *Data;
168 unsigned Length;
169 };
170
171 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000172 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000173 ImmTy Type;
174 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000175 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000176 };
177
178 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000179 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000180 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000181 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000182 };
183
184 union {
185 TokOp Tok;
186 ImmOp Imm;
187 RegOp Reg;
188 const MCExpr *Expr;
189 };
190
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000192 if (Kind == Token)
193 return true;
194
195 if (Kind != Expression || !Expr)
196 return false;
197
198 // When parsing operands, we can't always tell if something was meant to be
199 // a token, like 'gds', or an expression that references a global variable.
200 // In this case, we assume the string is an expression, and if we need to
201 // interpret is a token, then we treat the symbol name as the token.
202 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000203 }
204
205 bool isImm() const override {
206 return Kind == Immediate;
207 }
208
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000209 bool isInlinableImm(MVT type) const;
210 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000211
Tom Stellard45bb48e2015-06-13 03:28:10 +0000212 bool isRegKind() const {
213 return Kind == Register;
214 }
215
216 bool isReg() const override {
Sam Kolton945231a2016-06-10 09:57:59 +0000217 return isRegKind() && !Reg.Mods.hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000218 }
219
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000220 bool isRegOrImmWithInputMods(MVT type) const {
221 return isRegKind() || isInlinableImm(type);
222 }
223
Matt Arsenault4bd72362016-12-10 00:39:12 +0000224 bool isRegOrImmWithInt16InputMods() const {
225 return isRegOrImmWithInputMods(MVT::i16);
226 }
227
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000228 bool isRegOrImmWithInt32InputMods() const {
229 return isRegOrImmWithInputMods(MVT::i32);
230 }
231
232 bool isRegOrImmWithInt64InputMods() const {
233 return isRegOrImmWithInputMods(MVT::i64);
234 }
235
Matt Arsenault4bd72362016-12-10 00:39:12 +0000236 bool isRegOrImmWithFP16InputMods() const {
237 return isRegOrImmWithInputMods(MVT::f16);
238 }
239
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000240 bool isRegOrImmWithFP32InputMods() const {
241 return isRegOrImmWithInputMods(MVT::f32);
242 }
243
244 bool isRegOrImmWithFP64InputMods() const {
245 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000246 }
247
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000248 bool isVReg32OrOff() const {
249 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
250 }
251
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000252 bool isImmTy(ImmTy ImmT) const {
253 return isImm() && Imm.Type == ImmT;
254 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000255
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000256 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000257 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000258 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000259
Sam Kolton945231a2016-06-10 09:57:59 +0000260 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
261 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
262 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000263 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
264 bool isDA() const { return isImmTy(ImmTyDA); }
265 bool isR128() const { return isImmTy(ImmTyUNorm); }
266 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000267 bool isOff() const { return isImmTy(ImmTyOff); }
268 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000269 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
270 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000271 bool isOffen() const { return isImmTy(ImmTyOffen); }
272 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
273 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
274 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
275 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
276 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000277 bool isGDS() const { return isImmTy(ImmTyGDS); }
278 bool isGLC() const { return isImmTy(ImmTyGLC); }
279 bool isSLC() const { return isImmTy(ImmTySLC); }
280 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000281 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
282 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
283 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
284 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
285 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
286 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
287 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000288 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
289 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
290 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000291
Sam Kolton945231a2016-06-10 09:57:59 +0000292 bool isMod() const {
293 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000294 }
295
296 bool isRegOrImm() const {
297 return isReg() || isImm();
298 }
299
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000300 bool isRegClass(unsigned RCID) const;
301
Matt Arsenault4bd72362016-12-10 00:39:12 +0000302 bool isSCSrcB16() const {
303 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i16);
304 }
305
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000306 bool isSCSrcB32() const {
307 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000308 }
309
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000310 bool isSCSrcB64() const {
311 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000312 }
313
Matt Arsenault4bd72362016-12-10 00:39:12 +0000314 bool isSCSrcF16() const {
315 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f16);
316 }
317
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000318 bool isSCSrcF32() const {
319 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000320 }
321
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000322 bool isSCSrcF64() const {
323 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000324 }
325
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000326 bool isSSrcB32() const {
327 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
328 }
329
Matt Arsenault4bd72362016-12-10 00:39:12 +0000330 bool isSSrcB16() const {
331 return isSCSrcB16() || isLiteralImm(MVT::i16);
332 }
333
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000334 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000335 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
336 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000337 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000338 }
339
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000340 bool isSSrcF32() const {
341 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000342 }
343
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000344 bool isSSrcF64() const {
345 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000346 }
347
Matt Arsenault4bd72362016-12-10 00:39:12 +0000348 bool isSSrcF16() const {
349 return isSCSrcB16() || isLiteralImm(MVT::f16);
350 }
351
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000352 bool isVCSrcB32() const {
353 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000354 }
355
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000356 bool isVCSrcB64() const {
357 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::i64);
358 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000359
Matt Arsenault4bd72362016-12-10 00:39:12 +0000360 bool isVCSrcB16() const {
361 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i16);
362 }
363
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000364 bool isVCSrcF32() const {
365 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f32);
366 }
367
368 bool isVCSrcF64() const {
369 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::f64);
370 }
371
Matt Arsenault4bd72362016-12-10 00:39:12 +0000372 bool isVCSrcF16() const {
373 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f16);
374 }
375
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000376 bool isVSrcB32() const {
377 return isVCSrcF32() || isLiteralImm(MVT::i32);
378 }
379
380 bool isVSrcB64() const {
381 return isVCSrcF64() || isLiteralImm(MVT::i64);
382 }
383
Matt Arsenault4bd72362016-12-10 00:39:12 +0000384 bool isVSrcB16() const {
385 return isVCSrcF16() || isLiteralImm(MVT::i16);
386 }
387
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000388 bool isVSrcF32() const {
389 return isVCSrcF32() || isLiteralImm(MVT::f32);
390 }
391
392 bool isVSrcF64() const {
393 return isVCSrcF64() || isLiteralImm(MVT::f64);
394 }
395
Matt Arsenault4bd72362016-12-10 00:39:12 +0000396 bool isVSrcF16() const {
397 return isVCSrcF16() || isLiteralImm(MVT::f16);
398 }
399
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000400 bool isKImmFP32() const {
401 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000402 }
403
Matt Arsenault4bd72362016-12-10 00:39:12 +0000404 bool isKImmFP16() const {
405 return isLiteralImm(MVT::f16);
406 }
407
Tom Stellard45bb48e2015-06-13 03:28:10 +0000408 bool isMem() const override {
409 return false;
410 }
411
412 bool isExpr() const {
413 return Kind == Expression;
414 }
415
416 bool isSoppBrTarget() const {
417 return isExpr() || isImm();
418 }
419
Sam Kolton945231a2016-06-10 09:57:59 +0000420 bool isSWaitCnt() const;
421 bool isHwreg() const;
422 bool isSendMsg() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000423 bool isSMRDOffset8() const;
424 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000425 bool isSMRDLiteralOffset() const;
426 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000427 bool isGPRIdxMode() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000428
Tom Stellard89049702016-06-15 02:54:14 +0000429 StringRef getExpressionAsToken() const {
430 assert(isExpr());
431 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
432 return S->getSymbol().getName();
433 }
434
Sam Kolton945231a2016-06-10 09:57:59 +0000435 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000436 assert(isToken());
437
438 if (Kind == Expression)
439 return getExpressionAsToken();
440
Sam Kolton945231a2016-06-10 09:57:59 +0000441 return StringRef(Tok.Data, Tok.Length);
442 }
443
444 int64_t getImm() const {
445 assert(isImm());
446 return Imm.Val;
447 }
448
449 enum ImmTy getImmTy() const {
450 assert(isImm());
451 return Imm.Type;
452 }
453
454 unsigned getReg() const override {
455 return Reg.RegNo;
456 }
457
Tom Stellard45bb48e2015-06-13 03:28:10 +0000458 SMLoc getStartLoc() const override {
459 return StartLoc;
460 }
461
Peter Collingbourne0da86302016-10-10 22:49:37 +0000462 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000463 return EndLoc;
464 }
465
Sam Kolton945231a2016-06-10 09:57:59 +0000466 Modifiers getModifiers() const {
467 assert(isRegKind() || isImmTy(ImmTyNone));
468 return isRegKind() ? Reg.Mods : Imm.Mods;
469 }
470
471 void setModifiers(Modifiers Mods) {
472 assert(isRegKind() || isImmTy(ImmTyNone));
473 if (isRegKind())
474 Reg.Mods = Mods;
475 else
476 Imm.Mods = Mods;
477 }
478
479 bool hasModifiers() const {
480 return getModifiers().hasModifiers();
481 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000482
Sam Kolton945231a2016-06-10 09:57:59 +0000483 bool hasFPModifiers() const {
484 return getModifiers().hasFPModifiers();
485 }
486
487 bool hasIntModifiers() const {
488 return getModifiers().hasIntModifiers();
489 }
490
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000491 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000492
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000493 void addLiteralImmOperand(MCInst &Inst, int64_t Val) const;
494
Matt Arsenault4bd72362016-12-10 00:39:12 +0000495 template <unsigned Bitwidth>
496 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
497
498 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
499 addKImmFPOperands<16>(Inst, N);
500 }
501
502 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
503 addKImmFPOperands<32>(Inst, N);
504 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000505
506 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000507
508 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
509 if (isRegKind())
510 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000511 else if (isExpr())
512 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000513 else
514 addImmOperands(Inst, N);
515 }
516
517 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
518 Modifiers Mods = getModifiers();
519 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
520 if (isRegKind()) {
521 addRegOperands(Inst, N);
522 } else {
523 addImmOperands(Inst, N, false);
524 }
525 }
526
527 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
528 assert(!hasIntModifiers());
529 addRegOrImmWithInputModsOperands(Inst, N);
530 }
531
532 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
533 assert(!hasFPModifiers());
534 addRegOrImmWithInputModsOperands(Inst, N);
535 }
536
537 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
538 if (isImm())
539 addImmOperands(Inst, N);
540 else {
541 assert(isExpr());
542 Inst.addOperand(MCOperand::createExpr(Expr));
543 }
544 }
545
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000546 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000547 switch (Type) {
548 case ImmTyNone: OS << "None"; break;
549 case ImmTyGDS: OS << "GDS"; break;
550 case ImmTyOffen: OS << "Offen"; break;
551 case ImmTyIdxen: OS << "Idxen"; break;
552 case ImmTyAddr64: OS << "Addr64"; break;
553 case ImmTyOffset: OS << "Offset"; break;
554 case ImmTyOffset0: OS << "Offset0"; break;
555 case ImmTyOffset1: OS << "Offset1"; break;
556 case ImmTyGLC: OS << "GLC"; break;
557 case ImmTySLC: OS << "SLC"; break;
558 case ImmTyTFE: OS << "TFE"; break;
559 case ImmTyClampSI: OS << "ClampSI"; break;
560 case ImmTyOModSI: OS << "OModSI"; break;
561 case ImmTyDppCtrl: OS << "DppCtrl"; break;
562 case ImmTyDppRowMask: OS << "DppRowMask"; break;
563 case ImmTyDppBankMask: OS << "DppBankMask"; break;
564 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000565 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
566 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
567 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000568 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
569 case ImmTyDMask: OS << "DMask"; break;
570 case ImmTyUNorm: OS << "UNorm"; break;
571 case ImmTyDA: OS << "DA"; break;
572 case ImmTyR128: OS << "R128"; break;
573 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000574 case ImmTyOff: OS << "Off"; break;
575 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000576 case ImmTyExpCompr: OS << "ExpCompr"; break;
577 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000578 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000579 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000580 case ImmTyInterpSlot: OS << "InterpSlot"; break;
581 case ImmTyInterpAttr: OS << "InterpAttr"; break;
582 case ImmTyAttrChan: OS << "AttrChan"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000583 }
584 }
585
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000586 void print(raw_ostream &OS) const override {
587 switch (Kind) {
588 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000589 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000590 break;
591 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000592 OS << '<' << getImm();
593 if (getImmTy() != ImmTyNone) {
594 OS << " type: "; printImmTy(OS, getImmTy());
595 }
Sam Kolton945231a2016-06-10 09:57:59 +0000596 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000597 break;
598 case Token:
599 OS << '\'' << getToken() << '\'';
600 break;
601 case Expression:
602 OS << "<expr " << *Expr << '>';
603 break;
604 }
605 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000606
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000607 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
608 int64_t Val, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000609 enum ImmTy Type = ImmTyNone,
610 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000611 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000612 Op->Imm.Val = Val;
613 Op->Imm.IsFPImm = IsFPImm;
614 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000615 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000616 Op->StartLoc = Loc;
617 Op->EndLoc = Loc;
618 return Op;
619 }
620
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000621 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
622 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000623 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000624 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000625 Res->Tok.Data = Str.data();
626 Res->Tok.Length = Str.size();
627 Res->StartLoc = Loc;
628 Res->EndLoc = Loc;
629 return Res;
630 }
631
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000632 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
633 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000634 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000635 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000636 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000637 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000638 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000639 Op->Reg.IsForcedVOP3 = ForceVOP3;
640 Op->StartLoc = S;
641 Op->EndLoc = E;
642 return Op;
643 }
644
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000645 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
646 const class MCExpr *Expr, SMLoc S) {
647 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000648 Op->Expr = Expr;
649 Op->StartLoc = S;
650 Op->EndLoc = S;
651 return Op;
652 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000653};
654
Sam Kolton945231a2016-06-10 09:57:59 +0000655raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
656 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
657 return OS;
658}
659
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000660//===----------------------------------------------------------------------===//
661// AsmParser
662//===----------------------------------------------------------------------===//
663
Artem Tamazova01cce82016-12-27 16:00:11 +0000664// Holds info related to the current kernel, e.g. count of SGPRs used.
665// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
666// .amdgpu_hsa_kernel or at EOF.
667class KernelScopeInfo {
668 int SgprIndexUnusedMin;
669 int VgprIndexUnusedMin;
670 MCContext *Ctx;
671
672 void usesSgprAt(int i) {
673 if (i >= SgprIndexUnusedMin) {
674 SgprIndexUnusedMin = ++i;
675 if (Ctx) {
676 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
677 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
678 }
679 }
680 }
681 void usesVgprAt(int i) {
682 if (i >= VgprIndexUnusedMin) {
683 VgprIndexUnusedMin = ++i;
684 if (Ctx) {
685 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
686 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
687 }
688 }
689 }
690public:
691 KernelScopeInfo() : SgprIndexUnusedMin(-1), VgprIndexUnusedMin(-1), Ctx(nullptr)
692 {}
693 void initialize(MCContext &Context) {
694 Ctx = &Context;
695 usesSgprAt(SgprIndexUnusedMin = -1);
696 usesVgprAt(VgprIndexUnusedMin = -1);
697 }
698 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
699 switch (RegKind) {
700 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
701 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
702 default: break;
703 }
704 }
705};
706
Tom Stellard45bb48e2015-06-13 03:28:10 +0000707class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000708 const MCInstrInfo &MII;
709 MCAsmParser &Parser;
710
711 unsigned ForcedEncodingSize;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000712 bool ForcedDPP;
713 bool ForcedSDWA;
Artem Tamazova01cce82016-12-27 16:00:11 +0000714 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000715
Tom Stellard45bb48e2015-06-13 03:28:10 +0000716 /// @name Auto-generated Match Functions
717 /// {
718
719#define GET_ASSEMBLER_HEADER
720#include "AMDGPUGenAsmMatcher.inc"
721
722 /// }
723
Tom Stellard347ac792015-06-26 21:15:07 +0000724private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000725 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Tom Stellard347ac792015-06-26 21:15:07 +0000726 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
727 bool ParseDirectiveHSACodeObjectVersion();
728 bool ParseDirectiveHSACodeObjectISA();
Sam Kolton69c8aa22016-12-19 11:43:15 +0000729 bool ParseDirectiveRuntimeMetadata();
Tom Stellardff7416b2015-06-26 21:58:31 +0000730 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
731 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000732 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000733 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000734 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000735 bool ParseDirectiveAMDGPUHsaModuleGlobal();
736 bool ParseDirectiveAMDGPUHsaProgramGlobal();
737 bool ParseSectionDirectiveHSADataGlobalAgent();
738 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000739 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000740 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
Artem Tamazova01cce82016-12-27 16:00:11 +0000741 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth, unsigned *DwordRegIndex);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000742 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000743
Tom Stellard45bb48e2015-06-13 03:28:10 +0000744public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000745 enum AMDGPUMatchResultTy {
746 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
747 };
748
Akira Hatanakab11ef082015-11-14 06:35:56 +0000749 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000750 const MCInstrInfo &MII,
751 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000752 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Sam Kolton05ef1c92016-06-03 10:27:37 +0000753 ForcedEncodingSize(0),
754 ForcedDPP(false),
755 ForcedSDWA(false) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000756 MCAsmParserExtension::Initialize(Parser);
757
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000758 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000759 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000760 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000761 }
762
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000763 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000764
765 {
766 // TODO: make those pre-defined variables read-only.
767 // Currently there is none suitable machinery in the core llvm-mc for this.
768 // MCSymbol::isRedefinable is intended for another purpose, and
769 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
770 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
771 MCContext &Ctx = getContext();
772 MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
773 Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
774 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
775 Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
776 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
777 Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
778 }
Artem Tamazova01cce82016-12-27 16:00:11 +0000779 KernelScope.initialize(getContext());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000780 }
781
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000782 bool isSI() const {
783 return AMDGPU::isSI(getSTI());
784 }
785
786 bool isCI() const {
787 return AMDGPU::isCI(getSTI());
788 }
789
790 bool isVI() const {
791 return AMDGPU::isVI(getSTI());
792 }
793
Matt Arsenault26faed32016-12-05 22:26:17 +0000794 bool hasInv2PiInlineImm() const {
795 return getSTI().getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
796 }
797
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000798 bool hasSGPR102_SGPR103() const {
799 return !isVI();
800 }
801
Tom Stellard347ac792015-06-26 21:15:07 +0000802 AMDGPUTargetStreamer &getTargetStreamer() {
803 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
804 return static_cast<AMDGPUTargetStreamer &>(TS);
805 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000806
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000807 const MCRegisterInfo *getMRI() const {
808 // We need this const_cast because for some reason getContext() is not const
809 // in MCAsmParser.
810 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
811 }
812
813 const MCInstrInfo *getMII() const {
814 return &MII;
815 }
816
Sam Kolton05ef1c92016-06-03 10:27:37 +0000817 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
818 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
819 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000820
Sam Kolton05ef1c92016-06-03 10:27:37 +0000821 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
822 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
823 bool isForcedDPP() const { return ForcedDPP; }
824 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +0000825 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000826
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000827 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000828 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
829 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000830 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
831 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000832 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
833 OperandVector &Operands, MCStreamer &Out,
834 uint64_t &ErrorInfo,
835 bool MatchingInlineAsm) override;
836 bool ParseDirective(AsmToken DirectiveID) override;
837 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000838 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000839 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
840 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000841 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000842
Sam Kolton11de3702016-05-24 12:38:33 +0000843 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000844 OperandMatchResultTy
845 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
846 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
847 bool (*ConvertResult)(int64_t &) = nullptr);
848 OperandMatchResultTy
849 parseNamedBit(const char *Name, OperandVector &Operands,
850 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
851 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
852 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000853
Sam Kolton1bdcef72016-05-23 09:59:02 +0000854 OperandMatchResultTy parseImm(OperandVector &Operands);
855 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000856 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
857 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000858 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000859
Tom Stellard45bb48e2015-06-13 03:28:10 +0000860 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
861 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000862 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000863
864 bool parseCnt(int64_t &IntVal);
865 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000866 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000867
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000868private:
869 struct OperandInfoTy {
870 int64_t Id;
871 bool IsSymbolic;
872 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
873 };
Sam Kolton11de3702016-05-24 12:38:33 +0000874
Artem Tamazov6edc1352016-05-26 17:00:33 +0000875 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
876 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000877
878 void errorExpTgt();
879 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
880
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000881public:
Sam Kolton11de3702016-05-24 12:38:33 +0000882 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
883
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000884 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000885 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000886 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
887 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000888 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
889
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000890 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
891 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
892 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000893 AMDGPUOperand::Ptr defaultGLC() const;
894 AMDGPUOperand::Ptr defaultSLC() const;
895 AMDGPUOperand::Ptr defaultTFE() const;
896
Sam Kolton5f10a132016-05-06 11:31:17 +0000897 AMDGPUOperand::Ptr defaultDMask() const;
898 AMDGPUOperand::Ptr defaultUNorm() const;
899 AMDGPUOperand::Ptr defaultDA() const;
900 AMDGPUOperand::Ptr defaultR128() const;
901 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000902 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
903 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +0000904 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +0000905
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000906 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
907
Tom Stellarda90b9522016-02-11 03:28:15 +0000908 void cvtId(MCInst &Inst, const OperandVector &Operands);
909 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000910 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000911
912 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000913 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000914
Sam Kolton11de3702016-05-24 12:38:33 +0000915 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000916 AMDGPUOperand::Ptr defaultRowMask() const;
917 AMDGPUOperand::Ptr defaultBankMask() const;
918 AMDGPUOperand::Ptr defaultBoundCtrl() const;
919 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000920
Sam Kolton05ef1c92016-06-03 10:27:37 +0000921 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
922 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000923 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000924 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
925 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +0000926 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
927 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
928 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000929};
930
931struct OptionalOperand {
932 const char *Name;
933 AMDGPUOperand::ImmTy Type;
934 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000935 bool (*ConvertResult)(int64_t&);
936};
937
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000938} // end anonymous namespace
939
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000940// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000941static const fltSemantics *getFltSemantics(unsigned Size) {
942 switch (Size) {
943 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +0000944 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +0000945 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +0000946 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +0000947 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +0000948 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000949 default:
950 llvm_unreachable("unsupported fp type");
951 }
952}
953
Matt Arsenault4bd72362016-12-10 00:39:12 +0000954static const fltSemantics *getFltSemantics(MVT VT) {
955 return getFltSemantics(VT.getSizeInBits() / 8);
956}
957
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000958//===----------------------------------------------------------------------===//
959// Operand
960//===----------------------------------------------------------------------===//
961
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000962static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
963 bool Lost;
964
965 // Convert literal to single precision
966 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
967 APFloat::rmNearestTiesToEven,
968 &Lost);
969 // We allow precision lost but not overflow or underflow
970 if (Status != APFloat::opOK &&
971 Lost &&
972 ((Status & APFloat::opOverflow) != 0 ||
973 (Status & APFloat::opUnderflow) != 0)) {
974 return false;
975 }
976
977 return true;
978}
979
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000980bool AMDGPUOperand::isInlinableImm(MVT type) const {
981 if (!isImmTy(ImmTyNone)) {
982 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
983 return false;
984 }
985 // TODO: We should avoid using host float here. It would be better to
986 // check the float bit values which is what a few other places do.
987 // We've had bot failures before due to weird NaN support on mips hosts.
988
989 APInt Literal(64, Imm.Val);
990
991 if (Imm.IsFPImm) { // We got fp literal token
992 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +0000993 return AMDGPU::isInlinableLiteral64(Imm.Val,
994 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000995 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000996
Stephan Bergmann17c7f702016-12-14 11:57:17 +0000997 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000998 if (!canLosslesslyConvertToFPType(FPLiteral, type))
999 return false;
1000
1001 // Check if single precision literal is inlinable
1002 return AMDGPU::isInlinableLiteral32(
1003 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001004 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001005 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001006
1007
1008 // We got int literal token.
1009 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001010 return AMDGPU::isInlinableLiteral64(Imm.Val,
1011 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001012 }
1013
Matt Arsenault4bd72362016-12-10 00:39:12 +00001014 if (type.getScalarSizeInBits() == 16) {
1015 return AMDGPU::isInlinableLiteral16(
1016 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1017 AsmParser->hasInv2PiInlineImm());
1018 }
1019
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001020 return AMDGPU::isInlinableLiteral32(
1021 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001022 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001023}
1024
1025bool AMDGPUOperand::isLiteralImm(MVT type) const {
1026 // Check that this imediate can be added as literal
1027 if (!isImmTy(ImmTyNone)) {
1028 return false;
1029 }
1030
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001031 if (!Imm.IsFPImm) {
1032 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001033
Matt Arsenault4bd72362016-12-10 00:39:12 +00001034 unsigned Size = type.getSizeInBits();
1035 if (Size == 64)
1036 Size = 32;
1037
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001038 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1039 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001040 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001041 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001042
1043 // We got fp literal token
1044 if (type == MVT::f64) { // Expected 64-bit fp operand
1045 // We would set low 64-bits of literal to zeroes but we accept this literals
1046 return true;
1047 }
1048
1049 if (type == MVT::i64) { // Expected 64-bit int operand
1050 // We don't allow fp literals in 64-bit integer instructions. It is
1051 // unclear how we should encode them.
1052 return false;
1053 }
1054
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001055 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001056 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001057}
1058
1059bool AMDGPUOperand::isRegClass(unsigned RCID) const {
1060 return isReg() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
1061}
1062
1063void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
1064 int64_t Val = Imm.Val;
1065 if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers() && Imm.Mods.Neg) {
1066 // Apply modifiers to immediate value. Only negate can get here
1067 if (Imm.IsFPImm) {
1068 APFloat F(BitsToDouble(Val));
1069 F.changeSign();
1070 Val = F.bitcastToAPInt().getZExtValue();
1071 } else {
1072 Val = -Val;
1073 }
1074 }
1075
Matt Arsenault4bd72362016-12-10 00:39:12 +00001076 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1077 Inst.getNumOperands())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001078 addLiteralImmOperand(Inst, Val);
1079 } else {
1080 Inst.addOperand(MCOperand::createImm(Val));
1081 }
1082}
1083
1084void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val) const {
1085 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1086 auto OpNum = Inst.getNumOperands();
1087 // Check that this operand accepts literals
1088 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1089
Matt Arsenault4bd72362016-12-10 00:39:12 +00001090 auto OpSize = AMDGPU::getOperandSize(InstDesc, OpNum); // expected operand size
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001091
1092 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault4bd72362016-12-10 00:39:12 +00001093 APInt Literal(64, Val);
1094
1095 switch (OpSize) {
1096 case 8: {
Matt Arsenault26faed32016-12-05 22:26:17 +00001097 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1098 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001099 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001100 return;
1101 }
1102
1103 // Non-inlineable
1104 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001105 // For fp operands we check if low 32 bits are zeros
1106 if (Literal.getLoBits(32) != 0) {
1107 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001108 "Can't encode literal as exact 64-bit floating-point operand. "
1109 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001110 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001111
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001112 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001113 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001114 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001115
1116 // We don't allow fp literals in 64-bit integer instructions. It is
1117 // unclear how we should encode them. This case should be checked earlier
1118 // in predicate methods (isLiteralImm())
1119 llvm_unreachable("fp literal in 64-bit integer instruction.");
1120 }
1121 case 4:
1122 case 2: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001123 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001124 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001125 // Convert literal to single precision
Matt Arsenault4bd72362016-12-10 00:39:12 +00001126 FPLiteral.convert(*getFltSemantics(OpSize),
1127 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001128 // We allow precision lost but not overflow or underflow. This should be
1129 // checked earlier in isLiteralImm()
1130 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001131 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001132 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001133 default:
1134 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001135 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001136
1137 return;
1138 }
1139
1140 // We got int literal token.
1141 // Only sign extend inline immediates.
1142 // FIXME: No errors on truncation
1143 switch (OpSize) {
1144 case 4: {
1145 if (isInt<32>(Val) &&
1146 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1147 AsmParser->hasInv2PiInlineImm())) {
1148 Inst.addOperand(MCOperand::createImm(Val));
1149 return;
1150 }
1151
1152 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1153 return;
1154 }
1155 case 8: {
1156 if (AMDGPU::isInlinableLiteral64(Val,
1157 AsmParser->hasInv2PiInlineImm())) {
1158 Inst.addOperand(MCOperand::createImm(Val));
1159 return;
1160 }
1161
1162 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1163 return;
1164 }
1165 case 2: {
1166 if (isInt<16>(Val) &&
1167 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1168 AsmParser->hasInv2PiInlineImm())) {
1169 Inst.addOperand(MCOperand::createImm(Val));
1170 return;
1171 }
1172
1173 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1174 return;
1175 }
1176 default:
1177 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001178 }
1179}
1180
Matt Arsenault4bd72362016-12-10 00:39:12 +00001181template <unsigned Bitwidth>
1182void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001183 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001184
1185 if (!Imm.IsFPImm) {
1186 // We got int literal token.
1187 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1188 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001189 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001190
1191 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001192 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001193 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1194 APFloat::rmNearestTiesToEven, &Lost);
1195 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001196}
1197
1198void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1199 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1200}
1201
1202//===----------------------------------------------------------------------===//
1203// AsmParser
1204//===----------------------------------------------------------------------===//
1205
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001206static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1207 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001208 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001209 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001210 case 1: return AMDGPU::VGPR_32RegClassID;
1211 case 2: return AMDGPU::VReg_64RegClassID;
1212 case 3: return AMDGPU::VReg_96RegClassID;
1213 case 4: return AMDGPU::VReg_128RegClassID;
1214 case 8: return AMDGPU::VReg_256RegClassID;
1215 case 16: return AMDGPU::VReg_512RegClassID;
1216 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001217 } else if (Is == IS_TTMP) {
1218 switch (RegWidth) {
1219 default: return -1;
1220 case 1: return AMDGPU::TTMP_32RegClassID;
1221 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001222 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001223 }
1224 } else if (Is == IS_SGPR) {
1225 switch (RegWidth) {
1226 default: return -1;
1227 case 1: return AMDGPU::SGPR_32RegClassID;
1228 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001229 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001230 case 8: return AMDGPU::SReg_256RegClassID;
1231 case 16: return AMDGPU::SReg_512RegClassID;
1232 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001233 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001234 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001235}
1236
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001237static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001238 return StringSwitch<unsigned>(RegName)
1239 .Case("exec", AMDGPU::EXEC)
1240 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001241 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001242 .Case("m0", AMDGPU::M0)
1243 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001244 .Case("tba", AMDGPU::TBA)
1245 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001246 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1247 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001248 .Case("vcc_lo", AMDGPU::VCC_LO)
1249 .Case("vcc_hi", AMDGPU::VCC_HI)
1250 .Case("exec_lo", AMDGPU::EXEC_LO)
1251 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001252 .Case("tma_lo", AMDGPU::TMA_LO)
1253 .Case("tma_hi", AMDGPU::TMA_HI)
1254 .Case("tba_lo", AMDGPU::TBA_LO)
1255 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001256 .Default(0);
1257}
1258
1259bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001260 auto R = parseRegister();
1261 if (!R) return true;
1262 assert(R->isReg());
1263 RegNo = R->getReg();
1264 StartLoc = R->getStartLoc();
1265 EndLoc = R->getEndLoc();
1266 return false;
1267}
1268
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001269bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
1270{
1271 switch (RegKind) {
1272 case IS_SPECIAL:
1273 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
1274 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
1275 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
1276 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
1277 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
1278 return false;
1279 case IS_VGPR:
1280 case IS_SGPR:
1281 case IS_TTMP:
1282 if (Reg1 != Reg + RegWidth) { return false; }
1283 RegWidth++;
1284 return true;
1285 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001286 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001287 }
1288}
1289
Artem Tamazova01cce82016-12-27 16:00:11 +00001290bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth, unsigned *DwordRegIndex)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001291{
Artem Tamazova01cce82016-12-27 16:00:11 +00001292 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001293 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1294 if (getLexer().is(AsmToken::Identifier)) {
1295 StringRef RegName = Parser.getTok().getString();
1296 if ((Reg = getSpecialRegForName(RegName))) {
1297 Parser.Lex();
1298 RegKind = IS_SPECIAL;
1299 } else {
1300 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001301 if (RegName[0] == 'v') {
1302 RegNumIndex = 1;
1303 RegKind = IS_VGPR;
1304 } else if (RegName[0] == 's') {
1305 RegNumIndex = 1;
1306 RegKind = IS_SGPR;
1307 } else if (RegName.startswith("ttmp")) {
1308 RegNumIndex = strlen("ttmp");
1309 RegKind = IS_TTMP;
1310 } else {
1311 return false;
1312 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001313 if (RegName.size() > RegNumIndex) {
1314 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001315 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1316 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001317 Parser.Lex();
1318 RegWidth = 1;
1319 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001320 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001321 Parser.Lex();
1322 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001323 if (getLexer().isNot(AsmToken::LBrac))
1324 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001325 Parser.Lex();
1326
Artem Tamazovf88397c2016-06-03 14:41:17 +00001327 if (getParser().parseAbsoluteExpression(RegLo))
1328 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001329
Artem Tamazov7da9b822016-05-27 12:50:13 +00001330 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001331 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1332 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001333 Parser.Lex();
1334
Artem Tamazov7da9b822016-05-27 12:50:13 +00001335 if (isRBrace) {
1336 RegHi = RegLo;
1337 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001338 if (getParser().parseAbsoluteExpression(RegHi))
1339 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001340
Artem Tamazovf88397c2016-06-03 14:41:17 +00001341 if (getLexer().isNot(AsmToken::RBrac))
1342 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001343 Parser.Lex();
1344 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001345 RegNum = (unsigned) RegLo;
1346 RegWidth = (RegHi - RegLo) + 1;
1347 }
1348 }
1349 } else if (getLexer().is(AsmToken::LBrac)) {
1350 // List of consecutive registers: [s0,s1,s2,s3]
1351 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001352 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001353 return false;
1354 if (RegWidth != 1)
1355 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001356 RegisterKind RegKind1;
1357 unsigned Reg1, RegNum1, RegWidth1;
1358 do {
1359 if (getLexer().is(AsmToken::Comma)) {
1360 Parser.Lex();
1361 } else if (getLexer().is(AsmToken::RBrac)) {
1362 Parser.Lex();
1363 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001364 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001365 if (RegWidth1 != 1) {
1366 return false;
1367 }
1368 if (RegKind1 != RegKind) {
1369 return false;
1370 }
1371 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1372 return false;
1373 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001374 } else {
1375 return false;
1376 }
1377 } while (true);
1378 } else {
1379 return false;
1380 }
1381 switch (RegKind) {
1382 case IS_SPECIAL:
1383 RegNum = 0;
1384 RegWidth = 1;
1385 break;
1386 case IS_VGPR:
1387 case IS_SGPR:
1388 case IS_TTMP:
1389 {
1390 unsigned Size = 1;
1391 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001392 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001393 Size = std::min(RegWidth, 4u);
1394 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001395 if (RegNum % Size != 0)
1396 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001397 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001398 RegNum = RegNum / Size;
1399 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001400 if (RCID == -1)
1401 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001402 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001403 if (RegNum >= RC.getNumRegs())
1404 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001405 Reg = RC.getRegister(RegNum);
1406 break;
1407 }
1408
1409 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001410 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001411 }
1412
Artem Tamazovf88397c2016-06-03 14:41:17 +00001413 if (!subtargetHasRegister(*TRI, Reg))
1414 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001415 return true;
1416}
1417
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001418std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001419 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001420 SMLoc StartLoc = Tok.getLoc();
1421 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001422 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001423 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001424
Artem Tamazova01cce82016-12-27 16:00:11 +00001425 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001426 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001427 }
Artem Tamazova01cce82016-12-27 16:00:11 +00001428 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001429 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001430}
1431
Alex Bradbury58eba092016-11-01 16:32:05 +00001432OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001433AMDGPUAsmParser::parseImm(OperandVector &Operands) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001434 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001435 bool Minus = false;
1436 if (getLexer().getKind() == AsmToken::Minus) {
1437 Minus = true;
1438 Parser.Lex();
1439 }
1440
1441 SMLoc S = Parser.getTok().getLoc();
1442 switch(getLexer().getKind()) {
1443 case AsmToken::Integer: {
1444 int64_t IntVal;
1445 if (getParser().parseAbsoluteExpression(IntVal))
1446 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001447 if (Minus)
1448 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001449 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001450 return MatchOperand_Success;
1451 }
1452 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001453 int64_t IntVal;
1454 if (getParser().parseAbsoluteExpression(IntVal))
1455 return MatchOperand_ParseFail;
1456
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001457 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001458 if (Minus)
1459 F.changeSign();
1460 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001461 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001462 AMDGPUOperand::ImmTyNone, true));
1463 return MatchOperand_Success;
1464 }
1465 default:
1466 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1467 }
1468}
1469
Alex Bradbury58eba092016-11-01 16:32:05 +00001470OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001471AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1472 auto res = parseImm(Operands);
1473 if (res != MatchOperand_NoMatch) {
1474 return res;
1475 }
1476
1477 if (auto R = parseRegister()) {
1478 assert(R->isReg());
1479 R->Reg.IsForcedVOP3 = isForcedVOP3();
1480 Operands.push_back(std::move(R));
1481 return MatchOperand_Success;
1482 }
1483 return MatchOperand_ParseFail;
1484}
1485
Alex Bradbury58eba092016-11-01 16:32:05 +00001486OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001487AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
Matt Arsenault37fefd62016-06-10 02:18:02 +00001488 // XXX: During parsing we can't determine if minus sign means
Sam Kolton1bdcef72016-05-23 09:59:02 +00001489 // negate-modifier or negative immediate value.
1490 // By default we suppose it is modifier.
1491 bool Negate = false, Abs = false, Abs2 = false;
1492
1493 if (getLexer().getKind()== AsmToken::Minus) {
1494 Parser.Lex();
1495 Negate = true;
1496 }
1497
1498 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1499 Parser.Lex();
1500 Abs2 = true;
1501 if (getLexer().isNot(AsmToken::LParen)) {
1502 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1503 return MatchOperand_ParseFail;
1504 }
1505 Parser.Lex();
1506 }
1507
1508 if (getLexer().getKind() == AsmToken::Pipe) {
1509 if (Abs2) {
1510 Error(Parser.getTok().getLoc(), "expected register or immediate");
1511 return MatchOperand_ParseFail;
1512 }
1513 Parser.Lex();
1514 Abs = true;
1515 }
1516
1517 auto Res = parseRegOrImm(Operands);
1518 if (Res != MatchOperand_Success) {
1519 return Res;
1520 }
1521
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001522 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001523 if (Negate) {
Sam Kolton945231a2016-06-10 09:57:59 +00001524 Mods.Neg = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001525 }
1526 if (Abs) {
1527 if (getLexer().getKind() != AsmToken::Pipe) {
1528 Error(Parser.getTok().getLoc(), "expected vertical bar");
1529 return MatchOperand_ParseFail;
1530 }
1531 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001532 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001533 }
1534 if (Abs2) {
1535 if (getLexer().isNot(AsmToken::RParen)) {
1536 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1537 return MatchOperand_ParseFail;
1538 }
1539 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001540 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001541 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001542
Sam Kolton945231a2016-06-10 09:57:59 +00001543 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001544 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001545 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001546 }
1547 return MatchOperand_Success;
1548}
1549
Alex Bradbury58eba092016-11-01 16:32:05 +00001550OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001551AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1552 bool Sext = false;
1553
1554 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1555 Parser.Lex();
1556 Sext = true;
1557 if (getLexer().isNot(AsmToken::LParen)) {
1558 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1559 return MatchOperand_ParseFail;
1560 }
1561 Parser.Lex();
1562 }
1563
1564 auto Res = parseRegOrImm(Operands);
1565 if (Res != MatchOperand_Success) {
1566 return Res;
1567 }
1568
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001569 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00001570 if (Sext) {
1571 if (getLexer().isNot(AsmToken::RParen)) {
1572 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1573 return MatchOperand_ParseFail;
1574 }
1575 Parser.Lex();
1576 Mods.Sext = true;
1577 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00001578
Sam Kolton945231a2016-06-10 09:57:59 +00001579 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001580 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001581 Op.setModifiers(Mods);
1582 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001583
Sam Kolton945231a2016-06-10 09:57:59 +00001584 return MatchOperand_Success;
1585}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001586
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001587OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1588 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1589 if (Reg) {
1590 Operands.push_back(std::move(Reg));
1591 return MatchOperand_Success;
1592 }
1593
1594 const AsmToken &Tok = Parser.getTok();
1595 if (Tok.getString() == "off") {
1596 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
1597 AMDGPUOperand::ImmTyOff, false));
1598 Parser.Lex();
1599 return MatchOperand_Success;
1600 }
1601
1602 return MatchOperand_NoMatch;
1603}
1604
Tom Stellard45bb48e2015-06-13 03:28:10 +00001605unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1606
1607 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1608
1609 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001610 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1611 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1612 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001613 return Match_InvalidOperand;
1614
Tom Stellard88e0b252015-10-06 15:57:53 +00001615 if ((TSFlags & SIInstrFlags::VOP3) &&
1616 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1617 getForcedEncodingSize() != 64)
1618 return Match_PreferE32;
1619
Sam Koltona568e3d2016-12-22 12:57:41 +00001620 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
1621 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00001622 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001623 auto OpNum =
1624 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00001625 const auto &Op = Inst.getOperand(OpNum);
1626 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
1627 return Match_InvalidOperand;
1628 }
1629 }
1630
Tom Stellard45bb48e2015-06-13 03:28:10 +00001631 return Match_Success;
1632}
1633
Matt Arsenault5f45e782017-01-09 18:44:11 +00001634// What asm variants we should check
1635ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
1636 if (getForcedEncodingSize() == 32) {
1637 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
1638 return makeArrayRef(Variants);
1639 }
1640
1641 if (isForcedVOP3()) {
1642 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
1643 return makeArrayRef(Variants);
1644 }
1645
1646 if (isForcedSDWA()) {
1647 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA};
1648 return makeArrayRef(Variants);
1649 }
1650
1651 if (isForcedDPP()) {
1652 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
1653 return makeArrayRef(Variants);
1654 }
1655
1656 static const unsigned Variants[] = {
1657 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
1658 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::DPP
1659 };
1660
1661 return makeArrayRef(Variants);
1662}
1663
Tom Stellard45bb48e2015-06-13 03:28:10 +00001664bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1665 OperandVector &Operands,
1666 MCStreamer &Out,
1667 uint64_t &ErrorInfo,
1668 bool MatchingInlineAsm) {
1669 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00001670 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00001671 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00001672 uint64_t EI;
1673 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
1674 Variant);
1675 // We order match statuses from least to most specific. We use most specific
1676 // status as resulting
1677 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
1678 if ((R == Match_Success) ||
1679 (R == Match_PreferE32) ||
1680 (R == Match_MissingFeature && Result != Match_PreferE32) ||
1681 (R == Match_InvalidOperand && Result != Match_MissingFeature
1682 && Result != Match_PreferE32) ||
1683 (R == Match_MnemonicFail && Result != Match_InvalidOperand
1684 && Result != Match_MissingFeature
1685 && Result != Match_PreferE32)) {
1686 Result = R;
1687 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001688 }
Sam Koltond63d8a72016-09-09 09:37:51 +00001689 if (R == Match_Success)
1690 break;
1691 }
1692
1693 switch (Result) {
1694 default: break;
1695 case Match_Success:
1696 Inst.setLoc(IDLoc);
1697 Out.EmitInstruction(Inst, getSTI());
1698 return false;
1699
1700 case Match_MissingFeature:
1701 return Error(IDLoc, "instruction not supported on this GPU");
1702
1703 case Match_MnemonicFail:
1704 return Error(IDLoc, "unrecognized instruction mnemonic");
1705
1706 case Match_InvalidOperand: {
1707 SMLoc ErrorLoc = IDLoc;
1708 if (ErrorInfo != ~0ULL) {
1709 if (ErrorInfo >= Operands.size()) {
1710 return Error(IDLoc, "too few operands for instruction");
1711 }
1712 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1713 if (ErrorLoc == SMLoc())
1714 ErrorLoc = IDLoc;
1715 }
1716 return Error(ErrorLoc, "invalid operand for instruction");
1717 }
1718
1719 case Match_PreferE32:
1720 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1721 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001722 }
1723 llvm_unreachable("Implement any new match types added!");
1724}
1725
Artem Tamazov25478d82016-12-29 15:41:52 +00001726bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
1727 int64_t Tmp = -1;
1728 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
1729 return true;
1730 }
1731 if (getParser().parseAbsoluteExpression(Tmp)) {
1732 return true;
1733 }
1734 Ret = static_cast<uint32_t>(Tmp);
1735 return false;
1736}
1737
1738
Tom Stellard347ac792015-06-26 21:15:07 +00001739bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1740 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00001741 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00001742 return TokError("invalid major version");
1743
Tom Stellard347ac792015-06-26 21:15:07 +00001744 if (getLexer().isNot(AsmToken::Comma))
1745 return TokError("minor version number required, comma expected");
1746 Lex();
1747
Artem Tamazov25478d82016-12-29 15:41:52 +00001748 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00001749 return TokError("invalid minor version");
1750
Tom Stellard347ac792015-06-26 21:15:07 +00001751 return false;
1752}
1753
1754bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1755
1756 uint32_t Major;
1757 uint32_t Minor;
1758
1759 if (ParseDirectiveMajorMinor(Major, Minor))
1760 return true;
1761
1762 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1763 return false;
1764}
1765
1766bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00001767 uint32_t Major;
1768 uint32_t Minor;
1769 uint32_t Stepping;
1770 StringRef VendorName;
1771 StringRef ArchName;
1772
1773 // If this directive has no arguments, then use the ISA version for the
1774 // targeted GPU.
1775 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001776 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001777 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1778 Isa.Stepping,
1779 "AMD", "AMDGPU");
1780 return false;
1781 }
1782
Tom Stellard347ac792015-06-26 21:15:07 +00001783 if (ParseDirectiveMajorMinor(Major, Minor))
1784 return true;
1785
1786 if (getLexer().isNot(AsmToken::Comma))
1787 return TokError("stepping version number required, comma expected");
1788 Lex();
1789
Artem Tamazov25478d82016-12-29 15:41:52 +00001790 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00001791 return TokError("invalid stepping version");
1792
Tom Stellard347ac792015-06-26 21:15:07 +00001793 if (getLexer().isNot(AsmToken::Comma))
1794 return TokError("vendor name required, comma expected");
1795 Lex();
1796
1797 if (getLexer().isNot(AsmToken::String))
1798 return TokError("invalid vendor name");
1799
1800 VendorName = getLexer().getTok().getStringContents();
1801 Lex();
1802
1803 if (getLexer().isNot(AsmToken::Comma))
1804 return TokError("arch name required, comma expected");
1805 Lex();
1806
1807 if (getLexer().isNot(AsmToken::String))
1808 return TokError("invalid arch name");
1809
1810 ArchName = getLexer().getTok().getStringContents();
1811 Lex();
1812
1813 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1814 VendorName, ArchName);
1815 return false;
1816}
1817
Sam Kolton69c8aa22016-12-19 11:43:15 +00001818bool AMDGPUAsmParser::ParseDirectiveRuntimeMetadata() {
1819 std::string Metadata;
1820 raw_string_ostream MS(Metadata);
1821
1822 getLexer().setSkipSpace(false);
1823
1824 bool FoundEnd = false;
1825 while (!getLexer().is(AsmToken::Eof)) {
1826 while (getLexer().is(AsmToken::Space)) {
1827 MS << ' ';
1828 Lex();
1829 }
1830
1831 if (getLexer().is(AsmToken::Identifier)) {
1832 StringRef ID = getLexer().getTok().getIdentifier();
1833 if (ID == ".end_amdgpu_runtime_metadata") {
1834 Lex();
1835 FoundEnd = true;
1836 break;
1837 }
1838 }
1839
1840 MS << Parser.parseStringToEndOfStatement()
1841 << getContext().getAsmInfo()->getSeparatorString();
1842
1843 Parser.eatToEndOfStatement();
1844 }
1845
1846 getLexer().setSkipSpace(true);
1847
1848 if (getLexer().is(AsmToken::Eof) && !FoundEnd)
1849 return TokError("expected directive .end_amdgpu_runtime_metadata not found");
1850
1851 MS.flush();
1852
1853 getTargetStreamer().EmitRuntimeMetadata(Metadata);
1854
1855 return false;
1856}
1857
Tom Stellardff7416b2015-06-26 21:58:31 +00001858bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1859 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001860 SmallString<40> ErrStr;
1861 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00001862 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001863 return TokError(Err.str());
1864 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001865 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001866 return false;
1867}
1868
1869bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00001870 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001871 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001872
1873 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00001874 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1875 // will set the current token to EndOfStatement.
1876 while(getLexer().is(AsmToken::EndOfStatement))
1877 Lex();
1878
1879 if (getLexer().isNot(AsmToken::Identifier))
1880 return TokError("expected value identifier or .end_amd_kernel_code_t");
1881
1882 StringRef ID = getLexer().getTok().getIdentifier();
1883 Lex();
1884
1885 if (ID == ".end_amd_kernel_code_t")
1886 break;
1887
1888 if (ParseAMDKernelCodeTValue(ID, Header))
1889 return true;
1890 }
1891
1892 getTargetStreamer().EmitAMDKernelCodeT(Header);
1893
1894 return false;
1895}
1896
Tom Stellarde135ffd2015-09-25 21:41:28 +00001897bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1898 getParser().getStreamer().SwitchSection(
1899 AMDGPU::getHSATextSection(getContext()));
1900 return false;
1901}
1902
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001903bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1904 if (getLexer().isNot(AsmToken::Identifier))
1905 return TokError("expected symbol name");
1906
1907 StringRef KernelName = Parser.getTok().getString();
1908
1909 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1910 ELF::STT_AMDGPU_HSA_KERNEL);
1911 Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001912 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001913 return false;
1914}
1915
Tom Stellard00f2f912015-12-02 19:47:57 +00001916bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1917 if (getLexer().isNot(AsmToken::Identifier))
1918 return TokError("expected symbol name");
1919
1920 StringRef GlobalName = Parser.getTok().getIdentifier();
1921
1922 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1923 Lex();
1924 return false;
1925}
1926
1927bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1928 if (getLexer().isNot(AsmToken::Identifier))
1929 return TokError("expected symbol name");
1930
1931 StringRef GlobalName = Parser.getTok().getIdentifier();
1932
1933 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1934 Lex();
1935 return false;
1936}
1937
1938bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1939 getParser().getStreamer().SwitchSection(
1940 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1941 return false;
1942}
1943
1944bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1945 getParser().getStreamer().SwitchSection(
1946 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1947 return false;
1948}
1949
Tom Stellard9760f032015-12-03 03:34:32 +00001950bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1951 getParser().getStreamer().SwitchSection(
1952 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1953 return false;
1954}
1955
Tom Stellard45bb48e2015-06-13 03:28:10 +00001956bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001957 StringRef IDVal = DirectiveID.getString();
1958
1959 if (IDVal == ".hsa_code_object_version")
1960 return ParseDirectiveHSACodeObjectVersion();
1961
1962 if (IDVal == ".hsa_code_object_isa")
1963 return ParseDirectiveHSACodeObjectISA();
1964
Sam Kolton69c8aa22016-12-19 11:43:15 +00001965 if (IDVal == ".amdgpu_runtime_metadata")
1966 return ParseDirectiveRuntimeMetadata();
1967
Tom Stellardff7416b2015-06-26 21:58:31 +00001968 if (IDVal == ".amd_kernel_code_t")
1969 return ParseDirectiveAMDKernelCodeT();
1970
Tom Stellardfcfaea42016-05-05 17:03:33 +00001971 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001972 return ParseSectionDirectiveHSAText();
1973
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001974 if (IDVal == ".amdgpu_hsa_kernel")
1975 return ParseDirectiveAMDGPUHsaKernel();
1976
Tom Stellard00f2f912015-12-02 19:47:57 +00001977 if (IDVal == ".amdgpu_hsa_module_global")
1978 return ParseDirectiveAMDGPUHsaModuleGlobal();
1979
1980 if (IDVal == ".amdgpu_hsa_program_global")
1981 return ParseDirectiveAMDGPUHsaProgramGlobal();
1982
1983 if (IDVal == ".hsadata_global_agent")
1984 return ParseSectionDirectiveHSADataGlobalAgent();
1985
1986 if (IDVal == ".hsadata_global_program")
1987 return ParseSectionDirectiveHSADataGlobalProgram();
1988
Tom Stellard9760f032015-12-03 03:34:32 +00001989 if (IDVal == ".hsarodata_readonly_agent")
1990 return ParseSectionDirectiveHSARodataReadonlyAgent();
1991
Tom Stellard45bb48e2015-06-13 03:28:10 +00001992 return true;
1993}
1994
Matt Arsenault68802d32015-11-05 03:11:27 +00001995bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1996 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001997 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001998 return true;
1999
Matt Arsenault3b159672015-12-01 20:31:08 +00002000 if (isSI()) {
2001 // No flat_scr
2002 switch (RegNo) {
2003 case AMDGPU::FLAT_SCR:
2004 case AMDGPU::FLAT_SCR_LO:
2005 case AMDGPU::FLAT_SCR_HI:
2006 return false;
2007 default:
2008 return true;
2009 }
2010 }
2011
Matt Arsenault68802d32015-11-05 03:11:27 +00002012 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2013 // SI/CI have.
2014 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2015 R.isValid(); ++R) {
2016 if (*R == RegNo)
2017 return false;
2018 }
2019
2020 return true;
2021}
2022
Alex Bradbury58eba092016-11-01 16:32:05 +00002023OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002024AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
2025
2026 // Try to parse with a custom parser
2027 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2028
2029 // If we successfully parsed the operand or if there as an error parsing,
2030 // we are done.
2031 //
2032 // If we are parsing after we reach EndOfStatement then this means we
2033 // are appending default values to the Operands list. This is only done
2034 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002035 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00002036 getLexer().is(AsmToken::EndOfStatement))
2037 return ResTy;
2038
Sam Kolton1bdcef72016-05-23 09:59:02 +00002039 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00002040
Sam Kolton1bdcef72016-05-23 09:59:02 +00002041 if (ResTy == MatchOperand_Success)
2042 return ResTy;
2043
2044 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00002045 // If this identifier is a symbol, we want to create an expression for it.
2046 // It is a little difficult to distinguish between a symbol name, and
2047 // an instruction flag like 'gds'. In order to do this, we parse
2048 // all tokens as expressions and then treate the symbol name as the token
2049 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002050 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00002051 SMLoc S = Tok.getLoc();
2052 const MCExpr *Expr = nullptr;
2053 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002054 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00002055 return MatchOperand_Success;
2056 }
2057
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002058 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002059 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002060 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002061 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002062 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002063}
2064
Sam Kolton05ef1c92016-06-03 10:27:37 +00002065StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2066 // Clear any forced encodings from the previous instruction.
2067 setForcedEncodingSize(0);
2068 setForcedDPP(false);
2069 setForcedSDWA(false);
2070
2071 if (Name.endswith("_e64")) {
2072 setForcedEncodingSize(64);
2073 return Name.substr(0, Name.size() - 4);
2074 } else if (Name.endswith("_e32")) {
2075 setForcedEncodingSize(32);
2076 return Name.substr(0, Name.size() - 4);
2077 } else if (Name.endswith("_dpp")) {
2078 setForcedDPP(true);
2079 return Name.substr(0, Name.size() - 4);
2080 } else if (Name.endswith("_sdwa")) {
2081 setForcedSDWA(true);
2082 return Name.substr(0, Name.size() - 5);
2083 }
2084 return Name;
2085}
2086
Tom Stellard45bb48e2015-06-13 03:28:10 +00002087bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2088 StringRef Name,
2089 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002090 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00002091 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002092 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00002093
Tom Stellard45bb48e2015-06-13 03:28:10 +00002094 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00002095 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002096
2097 // Eat the comma or space if there is one.
2098 if (getLexer().is(AsmToken::Comma))
2099 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00002100
Tom Stellard45bb48e2015-06-13 03:28:10 +00002101 switch (Res) {
2102 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002103 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002104 Error(getLexer().getLoc(), "failed parsing operand.");
2105 while (!getLexer().is(AsmToken::EndOfStatement)) {
2106 Parser.Lex();
2107 }
2108 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002109 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002110 Error(getLexer().getLoc(), "not a valid operand.");
2111 while (!getLexer().is(AsmToken::EndOfStatement)) {
2112 Parser.Lex();
2113 }
2114 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002115 }
2116 }
2117
Tom Stellard45bb48e2015-06-13 03:28:10 +00002118 return false;
2119}
2120
2121//===----------------------------------------------------------------------===//
2122// Utility functions
2123//===----------------------------------------------------------------------===//
2124
Alex Bradbury58eba092016-11-01 16:32:05 +00002125OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002126AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002127 switch(getLexer().getKind()) {
2128 default: return MatchOperand_NoMatch;
2129 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002130 StringRef Name = Parser.getTok().getString();
2131 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002132 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002133 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002134
2135 Parser.Lex();
2136 if (getLexer().isNot(AsmToken::Colon))
2137 return MatchOperand_ParseFail;
2138
2139 Parser.Lex();
2140 if (getLexer().isNot(AsmToken::Integer))
2141 return MatchOperand_ParseFail;
2142
2143 if (getParser().parseAbsoluteExpression(Int))
2144 return MatchOperand_ParseFail;
2145 break;
2146 }
2147 }
2148 return MatchOperand_Success;
2149}
2150
Alex Bradbury58eba092016-11-01 16:32:05 +00002151OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002152AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002153 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002154 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002155 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002156 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002157
Alex Bradbury58eba092016-11-01 16:32:05 +00002158 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002159 if (Res != MatchOperand_Success)
2160 return Res;
2161
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002162 if (ConvertResult && !ConvertResult(Value)) {
2163 return MatchOperand_ParseFail;
2164 }
2165
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002166 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002167 return MatchOperand_Success;
2168}
2169
Alex Bradbury58eba092016-11-01 16:32:05 +00002170OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002171AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00002172 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002173 int64_t Bit = 0;
2174 SMLoc S = Parser.getTok().getLoc();
2175
2176 // We are at the end of the statement, and this is a default argument, so
2177 // use a default value.
2178 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2179 switch(getLexer().getKind()) {
2180 case AsmToken::Identifier: {
2181 StringRef Tok = Parser.getTok().getString();
2182 if (Tok == Name) {
2183 Bit = 1;
2184 Parser.Lex();
2185 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
2186 Bit = 0;
2187 Parser.Lex();
2188 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00002189 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002190 }
2191 break;
2192 }
2193 default:
2194 return MatchOperand_NoMatch;
2195 }
2196 }
2197
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002198 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002199 return MatchOperand_Success;
2200}
2201
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002202typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
2203
Sam Koltona74cd522016-03-18 15:35:51 +00002204void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
2205 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00002206 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002207 auto i = OptionalIdx.find(ImmT);
2208 if (i != OptionalIdx.end()) {
2209 unsigned Idx = i->second;
2210 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
2211 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00002212 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002213 }
2214}
2215
Alex Bradbury58eba092016-11-01 16:32:05 +00002216OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002217AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00002218 if (getLexer().isNot(AsmToken::Identifier)) {
2219 return MatchOperand_NoMatch;
2220 }
2221 StringRef Tok = Parser.getTok().getString();
2222 if (Tok != Prefix) {
2223 return MatchOperand_NoMatch;
2224 }
2225
2226 Parser.Lex();
2227 if (getLexer().isNot(AsmToken::Colon)) {
2228 return MatchOperand_ParseFail;
2229 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002230
Sam Kolton3025e7f2016-04-26 13:33:56 +00002231 Parser.Lex();
2232 if (getLexer().isNot(AsmToken::Identifier)) {
2233 return MatchOperand_ParseFail;
2234 }
2235
2236 Value = Parser.getTok().getString();
2237 return MatchOperand_Success;
2238}
2239
Tom Stellard45bb48e2015-06-13 03:28:10 +00002240//===----------------------------------------------------------------------===//
2241// ds
2242//===----------------------------------------------------------------------===//
2243
Tom Stellard45bb48e2015-06-13 03:28:10 +00002244void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2245 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002246 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002247
2248 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2249 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2250
2251 // Add the register arguments
2252 if (Op.isReg()) {
2253 Op.addRegOperands(Inst, 1);
2254 continue;
2255 }
2256
2257 // Handle optional arguments
2258 OptionalIdx[Op.getImmTy()] = i;
2259 }
2260
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002261 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2262 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002263 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002264
Tom Stellard45bb48e2015-06-13 03:28:10 +00002265 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2266}
2267
2268void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002269 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
2270 bool GDSOnly = false;
2271
2272 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2273 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2274
2275 // Add the register arguments
2276 if (Op.isReg()) {
2277 Op.addRegOperands(Inst, 1);
2278 continue;
2279 }
2280
2281 if (Op.isToken() && Op.getToken() == "gds") {
2282 GDSOnly = true;
2283 continue;
2284 }
2285
2286 // Handle optional arguments
2287 OptionalIdx[Op.getImmTy()] = i;
2288 }
2289
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002290 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
2291 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002292
2293 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002294 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002295 }
2296 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2297}
2298
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002299void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2300 OptionalImmIndexMap OptionalIdx;
2301
2302 unsigned EnMask = 0;
2303 int SrcIdx = 0;
2304
2305 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2306 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2307
2308 // Add the register arguments
2309 if (Op.isReg()) {
2310 EnMask |= (1 << SrcIdx);
2311 Op.addRegOperands(Inst, 1);
2312 ++SrcIdx;
2313 continue;
2314 }
2315
2316 if (Op.isOff()) {
2317 ++SrcIdx;
2318 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
2319 continue;
2320 }
2321
2322 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
2323 Op.addImmOperands(Inst, 1);
2324 continue;
2325 }
2326
2327 if (Op.isToken() && Op.getToken() == "done")
2328 continue;
2329
2330 // Handle optional arguments
2331 OptionalIdx[Op.getImmTy()] = i;
2332 }
2333
2334 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
2335 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
2336
2337 Inst.addOperand(MCOperand::createImm(EnMask));
2338}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002339
2340//===----------------------------------------------------------------------===//
2341// s_waitcnt
2342//===----------------------------------------------------------------------===//
2343
2344bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
2345 StringRef CntName = Parser.getTok().getString();
2346 int64_t CntVal;
2347
2348 Parser.Lex();
2349 if (getLexer().isNot(AsmToken::LParen))
2350 return true;
2351
2352 Parser.Lex();
2353 if (getLexer().isNot(AsmToken::Integer))
2354 return true;
2355
2356 if (getParser().parseAbsoluteExpression(CntVal))
2357 return true;
2358
2359 if (getLexer().isNot(AsmToken::RParen))
2360 return true;
2361
2362 Parser.Lex();
2363 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
2364 Parser.Lex();
2365
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002366 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002367 if (CntName == "vmcnt")
2368 IntVal = encodeVmcnt(IV, IntVal, CntVal);
2369 else if (CntName == "expcnt")
2370 IntVal = encodeExpcnt(IV, IntVal, CntVal);
2371 else if (CntName == "lgkmcnt")
2372 IntVal = encodeLgkmcnt(IV, IntVal, CntVal);
2373 else
Tom Stellard45bb48e2015-06-13 03:28:10 +00002374 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002375
Tom Stellard45bb48e2015-06-13 03:28:10 +00002376 return false;
2377}
2378
Alex Bradbury58eba092016-11-01 16:32:05 +00002379OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002380AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002381 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
2382 int64_t Waitcnt = getWaitcntBitMask(IV);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002383 SMLoc S = Parser.getTok().getLoc();
2384
2385 switch(getLexer().getKind()) {
2386 default: return MatchOperand_ParseFail;
2387 case AsmToken::Integer:
2388 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002389 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002390 return MatchOperand_ParseFail;
2391 break;
2392
2393 case AsmToken::Identifier:
2394 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002395 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002396 return MatchOperand_ParseFail;
2397 } while(getLexer().isNot(AsmToken::EndOfStatement));
2398 break;
2399 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002400 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002401 return MatchOperand_Success;
2402}
2403
Artem Tamazov6edc1352016-05-26 17:00:33 +00002404bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
2405 using namespace llvm::AMDGPU::Hwreg;
2406
Artem Tamazovd6468662016-04-25 14:13:51 +00002407 if (Parser.getTok().getString() != "hwreg")
2408 return true;
2409 Parser.Lex();
2410
2411 if (getLexer().isNot(AsmToken::LParen))
2412 return true;
2413 Parser.Lex();
2414
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002415 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002416 HwReg.IsSymbolic = true;
2417 HwReg.Id = ID_UNKNOWN_;
2418 const StringRef tok = Parser.getTok().getString();
2419 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2420 if (tok == IdSymbolic[i]) {
2421 HwReg.Id = i;
2422 break;
2423 }
2424 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002425 Parser.Lex();
2426 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002427 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002428 if (getLexer().isNot(AsmToken::Integer))
2429 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002430 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002431 return true;
2432 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002433
2434 if (getLexer().is(AsmToken::RParen)) {
2435 Parser.Lex();
2436 return false;
2437 }
2438
2439 // optional params
2440 if (getLexer().isNot(AsmToken::Comma))
2441 return true;
2442 Parser.Lex();
2443
2444 if (getLexer().isNot(AsmToken::Integer))
2445 return true;
2446 if (getParser().parseAbsoluteExpression(Offset))
2447 return true;
2448
2449 if (getLexer().isNot(AsmToken::Comma))
2450 return true;
2451 Parser.Lex();
2452
2453 if (getLexer().isNot(AsmToken::Integer))
2454 return true;
2455 if (getParser().parseAbsoluteExpression(Width))
2456 return true;
2457
2458 if (getLexer().isNot(AsmToken::RParen))
2459 return true;
2460 Parser.Lex();
2461
2462 return false;
2463}
2464
Alex Bradbury58eba092016-11-01 16:32:05 +00002465OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002466AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002467 using namespace llvm::AMDGPU::Hwreg;
2468
Artem Tamazovd6468662016-04-25 14:13:51 +00002469 int64_t Imm16Val = 0;
2470 SMLoc S = Parser.getTok().getLoc();
2471
2472 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00002473 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00002474 case AsmToken::Integer:
2475 // The operand can be an integer value.
2476 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00002477 return MatchOperand_NoMatch;
2478 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00002479 Error(S, "invalid immediate: only 16-bit values are legal");
2480 // Do not return error code, but create an imm operand anyway and proceed
2481 // to the next operand, if any. That avoids unneccessary error messages.
2482 }
2483 break;
2484
2485 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002486 OperandInfoTy HwReg(ID_UNKNOWN_);
2487 int64_t Offset = OFFSET_DEFAULT_;
2488 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
2489 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00002490 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002491 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
2492 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002493 Error(S, "invalid symbolic name of hardware register");
2494 else
2495 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00002496 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002497 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00002498 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002499 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00002500 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002501 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00002502 }
2503 break;
2504 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002505 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00002506 return MatchOperand_Success;
2507}
2508
Tom Stellard45bb48e2015-06-13 03:28:10 +00002509bool AMDGPUOperand::isSWaitCnt() const {
2510 return isImm();
2511}
2512
Artem Tamazovd6468662016-04-25 14:13:51 +00002513bool AMDGPUOperand::isHwreg() const {
2514 return isImmTy(ImmTyHwreg);
2515}
2516
Artem Tamazov6edc1352016-05-26 17:00:33 +00002517bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002518 using namespace llvm::AMDGPU::SendMsg;
2519
2520 if (Parser.getTok().getString() != "sendmsg")
2521 return true;
2522 Parser.Lex();
2523
2524 if (getLexer().isNot(AsmToken::LParen))
2525 return true;
2526 Parser.Lex();
2527
2528 if (getLexer().is(AsmToken::Identifier)) {
2529 Msg.IsSymbolic = true;
2530 Msg.Id = ID_UNKNOWN_;
2531 const std::string tok = Parser.getTok().getString();
2532 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
2533 switch(i) {
2534 default: continue; // Omit gaps.
2535 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
2536 }
2537 if (tok == IdSymbolic[i]) {
2538 Msg.Id = i;
2539 break;
2540 }
2541 }
2542 Parser.Lex();
2543 } else {
2544 Msg.IsSymbolic = false;
2545 if (getLexer().isNot(AsmToken::Integer))
2546 return true;
2547 if (getParser().parseAbsoluteExpression(Msg.Id))
2548 return true;
2549 if (getLexer().is(AsmToken::Integer))
2550 if (getParser().parseAbsoluteExpression(Msg.Id))
2551 Msg.Id = ID_UNKNOWN_;
2552 }
2553 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
2554 return false;
2555
2556 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
2557 if (getLexer().isNot(AsmToken::RParen))
2558 return true;
2559 Parser.Lex();
2560 return false;
2561 }
2562
2563 if (getLexer().isNot(AsmToken::Comma))
2564 return true;
2565 Parser.Lex();
2566
2567 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
2568 Operation.Id = ID_UNKNOWN_;
2569 if (getLexer().is(AsmToken::Identifier)) {
2570 Operation.IsSymbolic = true;
2571 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2572 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2573 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002574 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002575 for (int i = F; i < L; ++i) {
2576 if (Tok == S[i]) {
2577 Operation.Id = i;
2578 break;
2579 }
2580 }
2581 Parser.Lex();
2582 } else {
2583 Operation.IsSymbolic = false;
2584 if (getLexer().isNot(AsmToken::Integer))
2585 return true;
2586 if (getParser().parseAbsoluteExpression(Operation.Id))
2587 return true;
2588 }
2589
2590 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2591 // Stream id is optional.
2592 if (getLexer().is(AsmToken::RParen)) {
2593 Parser.Lex();
2594 return false;
2595 }
2596
2597 if (getLexer().isNot(AsmToken::Comma))
2598 return true;
2599 Parser.Lex();
2600
2601 if (getLexer().isNot(AsmToken::Integer))
2602 return true;
2603 if (getParser().parseAbsoluteExpression(StreamId))
2604 return true;
2605 }
2606
2607 if (getLexer().isNot(AsmToken::RParen))
2608 return true;
2609 Parser.Lex();
2610 return false;
2611}
2612
Matt Arsenault0e8a2992016-12-15 20:40:20 +00002613OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
2614 if (getLexer().getKind() != AsmToken::Identifier)
2615 return MatchOperand_NoMatch;
2616
2617 StringRef Str = Parser.getTok().getString();
2618 int Slot = StringSwitch<int>(Str)
2619 .Case("p10", 0)
2620 .Case("p20", 1)
2621 .Case("p0", 2)
2622 .Default(-1);
2623
2624 SMLoc S = Parser.getTok().getLoc();
2625 if (Slot == -1)
2626 return MatchOperand_ParseFail;
2627
2628 Parser.Lex();
2629 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
2630 AMDGPUOperand::ImmTyInterpSlot));
2631 return MatchOperand_Success;
2632}
2633
2634OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
2635 if (getLexer().getKind() != AsmToken::Identifier)
2636 return MatchOperand_NoMatch;
2637
2638 StringRef Str = Parser.getTok().getString();
2639 if (!Str.startswith("attr"))
2640 return MatchOperand_NoMatch;
2641
2642 StringRef Chan = Str.take_back(2);
2643 int AttrChan = StringSwitch<int>(Chan)
2644 .Case(".x", 0)
2645 .Case(".y", 1)
2646 .Case(".z", 2)
2647 .Case(".w", 3)
2648 .Default(-1);
2649 if (AttrChan == -1)
2650 return MatchOperand_ParseFail;
2651
2652 Str = Str.drop_back(2).drop_front(4);
2653
2654 uint8_t Attr;
2655 if (Str.getAsInteger(10, Attr))
2656 return MatchOperand_ParseFail;
2657
2658 SMLoc S = Parser.getTok().getLoc();
2659 Parser.Lex();
2660 if (Attr > 63) {
2661 Error(S, "out of bounds attr");
2662 return MatchOperand_Success;
2663 }
2664
2665 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
2666
2667 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
2668 AMDGPUOperand::ImmTyInterpAttr));
2669 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
2670 AMDGPUOperand::ImmTyAttrChan));
2671 return MatchOperand_Success;
2672}
2673
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002674void AMDGPUAsmParser::errorExpTgt() {
2675 Error(Parser.getTok().getLoc(), "invalid exp target");
2676}
2677
2678OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
2679 uint8_t &Val) {
2680 if (Str == "null") {
2681 Val = 9;
2682 return MatchOperand_Success;
2683 }
2684
2685 if (Str.startswith("mrt")) {
2686 Str = Str.drop_front(3);
2687 if (Str == "z") { // == mrtz
2688 Val = 8;
2689 return MatchOperand_Success;
2690 }
2691
2692 if (Str.getAsInteger(10, Val))
2693 return MatchOperand_ParseFail;
2694
2695 if (Val > 7)
2696 errorExpTgt();
2697
2698 return MatchOperand_Success;
2699 }
2700
2701 if (Str.startswith("pos")) {
2702 Str = Str.drop_front(3);
2703 if (Str.getAsInteger(10, Val))
2704 return MatchOperand_ParseFail;
2705
2706 if (Val > 3)
2707 errorExpTgt();
2708
2709 Val += 12;
2710 return MatchOperand_Success;
2711 }
2712
2713 if (Str.startswith("param")) {
2714 Str = Str.drop_front(5);
2715 if (Str.getAsInteger(10, Val))
2716 return MatchOperand_ParseFail;
2717
2718 if (Val >= 32)
2719 errorExpTgt();
2720
2721 Val += 32;
2722 return MatchOperand_Success;
2723 }
2724
2725 if (Str.startswith("invalid_target_")) {
2726 Str = Str.drop_front(15);
2727 if (Str.getAsInteger(10, Val))
2728 return MatchOperand_ParseFail;
2729
2730 errorExpTgt();
2731 return MatchOperand_Success;
2732 }
2733
2734 return MatchOperand_NoMatch;
2735}
2736
2737OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
2738 uint8_t Val;
2739 StringRef Str = Parser.getTok().getString();
2740
2741 auto Res = parseExpTgtImpl(Str, Val);
2742 if (Res != MatchOperand_Success)
2743 return Res;
2744
2745 SMLoc S = Parser.getTok().getLoc();
2746 Parser.Lex();
2747
2748 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
2749 AMDGPUOperand::ImmTyExpTgt));
2750 return MatchOperand_Success;
2751}
2752
Alex Bradbury58eba092016-11-01 16:32:05 +00002753OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002754AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2755 using namespace llvm::AMDGPU::SendMsg;
2756
2757 int64_t Imm16Val = 0;
2758 SMLoc S = Parser.getTok().getLoc();
2759
2760 switch(getLexer().getKind()) {
2761 default:
2762 return MatchOperand_NoMatch;
2763 case AsmToken::Integer:
2764 // The operand can be an integer value.
2765 if (getParser().parseAbsoluteExpression(Imm16Val))
2766 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002767 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002768 Error(S, "invalid immediate: only 16-bit values are legal");
2769 // Do not return error code, but create an imm operand anyway and proceed
2770 // to the next operand, if any. That avoids unneccessary error messages.
2771 }
2772 break;
2773 case AsmToken::Identifier: {
2774 OperandInfoTy Msg(ID_UNKNOWN_);
2775 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00002776 int64_t StreamId = STREAM_ID_DEFAULT_;
2777 if (parseSendMsgConstruct(Msg, Operation, StreamId))
2778 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002779 do {
2780 // Validate and encode message ID.
2781 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2782 || Msg.Id == ID_SYSMSG)) {
2783 if (Msg.IsSymbolic)
2784 Error(S, "invalid/unsupported symbolic name of message");
2785 else
2786 Error(S, "invalid/unsupported code of message");
2787 break;
2788 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002789 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002790 // Validate and encode operation ID.
2791 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2792 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2793 if (Operation.IsSymbolic)
2794 Error(S, "invalid symbolic name of GS_OP");
2795 else
2796 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2797 break;
2798 }
2799 if (Operation.Id == OP_GS_NOP
2800 && Msg.Id != ID_GS_DONE) {
2801 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2802 break;
2803 }
2804 Imm16Val |= (Operation.Id << OP_SHIFT_);
2805 }
2806 if (Msg.Id == ID_SYSMSG) {
2807 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2808 if (Operation.IsSymbolic)
2809 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2810 else
2811 Error(S, "invalid/unsupported code of SYSMSG_OP");
2812 break;
2813 }
2814 Imm16Val |= (Operation.Id << OP_SHIFT_);
2815 }
2816 // Validate and encode stream ID.
2817 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2818 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2819 Error(S, "invalid stream id: only 2-bit values are legal");
2820 break;
2821 }
2822 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2823 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00002824 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002825 }
2826 break;
2827 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002828 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002829 return MatchOperand_Success;
2830}
2831
2832bool AMDGPUOperand::isSendMsg() const {
2833 return isImmTy(ImmTySendMsg);
2834}
2835
Tom Stellard45bb48e2015-06-13 03:28:10 +00002836//===----------------------------------------------------------------------===//
2837// sopp branch targets
2838//===----------------------------------------------------------------------===//
2839
Alex Bradbury58eba092016-11-01 16:32:05 +00002840OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002841AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2842 SMLoc S = Parser.getTok().getLoc();
2843
2844 switch (getLexer().getKind()) {
2845 default: return MatchOperand_ParseFail;
2846 case AsmToken::Integer: {
2847 int64_t Imm;
2848 if (getParser().parseAbsoluteExpression(Imm))
2849 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002850 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002851 return MatchOperand_Success;
2852 }
2853
2854 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002855 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00002856 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2857 Parser.getTok().getString()), getContext()), S));
2858 Parser.Lex();
2859 return MatchOperand_Success;
2860 }
2861}
2862
2863//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002864// mubuf
2865//===----------------------------------------------------------------------===//
2866
Sam Kolton5f10a132016-05-06 11:31:17 +00002867AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002868 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002869}
2870
2871AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002872 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002873}
2874
2875AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002876 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002877}
2878
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002879void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2880 const OperandVector &Operands,
2881 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002882 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002883 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002884
2885 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2886 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2887
2888 // Add the register arguments
2889 if (Op.isReg()) {
2890 Op.addRegOperands(Inst, 1);
2891 continue;
2892 }
2893
2894 // Handle the case where soffset is an immediate
2895 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2896 Op.addImmOperands(Inst, 1);
2897 continue;
2898 }
2899
2900 // Handle tokens like 'offen' which are sometimes hard-coded into the
2901 // asm string. There are no MCInst operands for these.
2902 if (Op.isToken()) {
2903 continue;
2904 }
2905 assert(Op.isImm());
2906
2907 // Handle optional arguments
2908 OptionalIdx[Op.getImmTy()] = i;
2909 }
2910
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002911 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2912 if (IsAtomicReturn) {
2913 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2914 Inst.insert(I, *I);
2915 }
2916
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002917 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002918 if (!IsAtomic) { // glc is hard-coded.
2919 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2920 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002921 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2922 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002923}
2924
2925//===----------------------------------------------------------------------===//
2926// mimg
2927//===----------------------------------------------------------------------===//
2928
Sam Kolton1bdcef72016-05-23 09:59:02 +00002929void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2930 unsigned I = 1;
2931 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2932 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2933 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2934 }
2935
2936 OptionalImmIndexMap OptionalIdx;
2937
2938 for (unsigned E = Operands.size(); I != E; ++I) {
2939 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2940
2941 // Add the register arguments
2942 if (Op.isRegOrImm()) {
2943 Op.addRegOrImmOperands(Inst, 1);
2944 continue;
2945 } else if (Op.isImmModifier()) {
2946 OptionalIdx[Op.getImmTy()] = I;
2947 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002948 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002949 }
2950 }
2951
2952 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2953 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2954 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2955 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2956 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2957 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2958 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2959 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2960}
2961
2962void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2963 unsigned I = 1;
2964 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2965 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2966 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2967 }
2968
2969 // Add src, same as dst
2970 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2971
2972 OptionalImmIndexMap OptionalIdx;
2973
2974 for (unsigned E = Operands.size(); I != E; ++I) {
2975 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2976
2977 // Add the register arguments
2978 if (Op.isRegOrImm()) {
2979 Op.addRegOrImmOperands(Inst, 1);
2980 continue;
2981 } else if (Op.isImmModifier()) {
2982 OptionalIdx[Op.getImmTy()] = I;
2983 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002984 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002985 }
2986 }
2987
2988 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2989 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2990 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2991 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2992 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2993 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2994 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2995 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2996}
2997
Sam Kolton5f10a132016-05-06 11:31:17 +00002998AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002999 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00003000}
3001
3002AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003003 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00003004}
3005
3006AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003007 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00003008}
3009
3010AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003011 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00003012}
3013
3014AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003015 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00003016}
3017
Tom Stellard45bb48e2015-06-13 03:28:10 +00003018//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00003019// smrd
3020//===----------------------------------------------------------------------===//
3021
Artem Tamazov54bfd542016-10-31 16:07:39 +00003022bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00003023 return isImm() && isUInt<8>(getImm());
3024}
3025
Artem Tamazov54bfd542016-10-31 16:07:39 +00003026bool AMDGPUOperand::isSMRDOffset20() const {
3027 return isImm() && isUInt<20>(getImm());
3028}
3029
Tom Stellard217361c2015-08-06 19:28:38 +00003030bool AMDGPUOperand::isSMRDLiteralOffset() const {
3031 // 32-bit literals are only supported on CI and we only want to use them
3032 // when the offset is > 8-bits.
3033 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
3034}
3035
Artem Tamazov54bfd542016-10-31 16:07:39 +00003036AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
3037 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3038}
3039
3040AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003041 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003042}
3043
3044AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003045 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003046}
3047
Tom Stellard217361c2015-08-06 19:28:38 +00003048//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003049// vop3
3050//===----------------------------------------------------------------------===//
3051
3052static bool ConvertOmodMul(int64_t &Mul) {
3053 if (Mul != 1 && Mul != 2 && Mul != 4)
3054 return false;
3055
3056 Mul >>= 1;
3057 return true;
3058}
3059
3060static bool ConvertOmodDiv(int64_t &Div) {
3061 if (Div == 1) {
3062 Div = 0;
3063 return true;
3064 }
3065
3066 if (Div == 2) {
3067 Div = 3;
3068 return true;
3069 }
3070
3071 return false;
3072}
3073
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003074static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
3075 if (BoundCtrl == 0) {
3076 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003077 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00003078 }
3079
3080 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003081 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003082 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003083 }
Matt Arsenault12c53892016-11-15 19:58:54 +00003084
Tom Stellard45bb48e2015-06-13 03:28:10 +00003085 return false;
3086}
3087
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003088// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00003089static const OptionalOperand AMDGPUOptionalOperandTable[] = {
3090 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
3091 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
3092 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
3093 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
3094 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
3095 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
3096 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
3097 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
3098 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
3099 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
3100 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
3101 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
3102 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
3103 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
3104 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
3105 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
3106 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
3107 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
3108 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
3109 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00003110 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
3111 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
3112 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00003113 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003114 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003115};
Tom Stellard45bb48e2015-06-13 03:28:10 +00003116
Alex Bradbury58eba092016-11-01 16:32:05 +00003117OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00003118 OperandMatchResultTy res;
3119 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
3120 // try to parse any optional operand here
3121 if (Op.IsBit) {
3122 res = parseNamedBit(Op.Name, Operands, Op.Type);
3123 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
3124 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003125 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
3126 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
3127 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
3128 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00003129 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
3130 res = parseSDWADstUnused(Operands);
3131 } else {
3132 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
3133 }
3134 if (res != MatchOperand_NoMatch) {
3135 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003136 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003137 }
3138 return MatchOperand_NoMatch;
3139}
3140
Matt Arsenault12c53892016-11-15 19:58:54 +00003141OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003142 StringRef Name = Parser.getTok().getString();
3143 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00003144 return parseIntWithPrefix("mul", Operands,
3145 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003146 }
Matt Arsenault12c53892016-11-15 19:58:54 +00003147
3148 if (Name == "div") {
3149 return parseIntWithPrefix("div", Operands,
3150 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
3151 }
3152
3153 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003154}
3155
Tom Stellarda90b9522016-02-11 03:28:15 +00003156void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
3157 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00003158 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00003159 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00003160 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3161 }
3162 for (unsigned E = Operands.size(); I != E; ++I)
3163 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
3164}
3165
3166void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003167 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
3168 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00003169 cvtVOP3(Inst, Operands);
3170 } else {
3171 cvtId(Inst, Operands);
3172 }
3173}
3174
Sam Koltona3ec5c12016-10-07 14:46:06 +00003175static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
3176 // 1. This operand is input modifiers
3177 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
3178 // 2. This is not last operand
3179 && Desc.NumOperands > (OpNum + 1)
3180 // 3. Next operand is register class
3181 && Desc.OpInfo[OpNum + 1].RegClass != -1
3182 // 4. Next register is not tied to any other operand
3183 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
3184}
3185
Tom Stellarda90b9522016-02-11 03:28:15 +00003186void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00003187 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00003188 unsigned I = 1;
3189 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00003190 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00003191 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00003192 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003193
Tom Stellarda90b9522016-02-11 03:28:15 +00003194 for (unsigned E = Operands.size(); I != E; ++I) {
3195 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003196 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00003197 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00003198 } else if (Op.isImm()) {
3199 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00003200 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00003201 llvm_unreachable("unhandled operand type");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003202 }
Tom Stellarda90b9522016-02-11 03:28:15 +00003203 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003204
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003205 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
3206 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003207
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003208 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003209 // it has src2 register operand that is tied to dst operand
3210 // we don't allow modifiers for this operand in assembler so src2_modifiers
3211 // should be 0
3212 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003213 Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
3214 Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003215 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003216 std::advance(
3217 it,
3218 AMDGPU::getNamedOperandIdx(Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ?
3219 AMDGPU::V_MAC_F16_e64 :
3220 AMDGPU::V_MAC_F32_e64,
3221 AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003222 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
3223 ++it;
3224 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
3225 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003226}
3227
Sam Koltondfa29f72016-03-09 12:29:31 +00003228//===----------------------------------------------------------------------===//
3229// dpp
3230//===----------------------------------------------------------------------===//
3231
3232bool AMDGPUOperand::isDPPCtrl() const {
3233 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
3234 if (result) {
3235 int64_t Imm = getImm();
3236 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
3237 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
3238 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
3239 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
3240 (Imm == 0x130) ||
3241 (Imm == 0x134) ||
3242 (Imm == 0x138) ||
3243 (Imm == 0x13c) ||
3244 (Imm == 0x140) ||
3245 (Imm == 0x141) ||
3246 (Imm == 0x142) ||
3247 (Imm == 0x143);
3248 }
3249 return false;
3250}
3251
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00003252bool AMDGPUOperand::isGPRIdxMode() const {
3253 return isImm() && isUInt<4>(getImm());
3254}
3255
Alex Bradbury58eba092016-11-01 16:32:05 +00003256OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00003257AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00003258 SMLoc S = Parser.getTok().getLoc();
3259 StringRef Prefix;
3260 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00003261
Sam Koltona74cd522016-03-18 15:35:51 +00003262 if (getLexer().getKind() == AsmToken::Identifier) {
3263 Prefix = Parser.getTok().getString();
3264 } else {
3265 return MatchOperand_NoMatch;
3266 }
3267
3268 if (Prefix == "row_mirror") {
3269 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003270 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003271 } else if (Prefix == "row_half_mirror") {
3272 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003273 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003274 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00003275 // Check to prevent parseDPPCtrlOps from eating invalid tokens
3276 if (Prefix != "quad_perm"
3277 && Prefix != "row_shl"
3278 && Prefix != "row_shr"
3279 && Prefix != "row_ror"
3280 && Prefix != "wave_shl"
3281 && Prefix != "wave_rol"
3282 && Prefix != "wave_shr"
3283 && Prefix != "wave_ror"
3284 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00003285 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00003286 }
3287
Sam Koltona74cd522016-03-18 15:35:51 +00003288 Parser.Lex();
3289 if (getLexer().isNot(AsmToken::Colon))
3290 return MatchOperand_ParseFail;
3291
3292 if (Prefix == "quad_perm") {
3293 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00003294 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003295 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00003296 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003297 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003298
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003299 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00003300 return MatchOperand_ParseFail;
3301
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003302 for (int i = 0; i < 3; ++i) {
3303 if (getLexer().isNot(AsmToken::Comma))
3304 return MatchOperand_ParseFail;
3305 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003306
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003307 int64_t Temp;
3308 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
3309 return MatchOperand_ParseFail;
3310 const int shift = i*2 + 2;
3311 Int += (Temp << shift);
3312 }
Sam Koltona74cd522016-03-18 15:35:51 +00003313
Sam Koltona74cd522016-03-18 15:35:51 +00003314 if (getLexer().isNot(AsmToken::RBrac))
3315 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003316 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003317
3318 } else {
3319 // sel:%d
3320 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003321 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00003322 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003323
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003324 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003325 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003326 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003327 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003328 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003329 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003330 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003331 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003332 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003333 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003334 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003335 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003336 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003337 Int = 0x13C;
3338 } else if (Prefix == "row_bcast") {
3339 if (Int == 15) {
3340 Int = 0x142;
3341 } else if (Int == 31) {
3342 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00003343 } else {
3344 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003345 }
3346 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00003347 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003348 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003349 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003350 }
Sam Koltona74cd522016-03-18 15:35:51 +00003351
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003352 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00003353 return MatchOperand_Success;
3354}
3355
Sam Kolton5f10a132016-05-06 11:31:17 +00003356AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003357 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003358}
3359
Sam Kolton5f10a132016-05-06 11:31:17 +00003360AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003361 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003362}
3363
Sam Kolton5f10a132016-05-06 11:31:17 +00003364AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003365 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00003366}
3367
3368void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00003369 OptionalImmIndexMap OptionalIdx;
3370
3371 unsigned I = 1;
3372 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3373 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3374 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3375 }
3376
3377 for (unsigned E = Operands.size(); I != E; ++I) {
3378 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3379 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00003380 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
3381 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token.
3382 // Skip it.
3383 continue;
3384 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00003385 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00003386 } else if (Op.isDPPCtrl()) {
3387 Op.addImmOperands(Inst, 1);
3388 } else if (Op.isImm()) {
3389 // Handle optional arguments
3390 OptionalIdx[Op.getImmTy()] = I;
3391 } else {
3392 llvm_unreachable("Invalid operand type");
3393 }
3394 }
3395
Sam Koltondfa29f72016-03-09 12:29:31 +00003396 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
3397 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
3398 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003399
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003400 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003401 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003402 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp ||
3403 Inst.getOpcode() == AMDGPU::V_MAC_F16_dpp) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003404 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003405 std::advance(
3406 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003407 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
3408 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003409}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00003410
Sam Kolton3025e7f2016-04-26 13:33:56 +00003411//===----------------------------------------------------------------------===//
3412// sdwa
3413//===----------------------------------------------------------------------===//
3414
Alex Bradbury58eba092016-11-01 16:32:05 +00003415OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003416AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
3417 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003418 using namespace llvm::AMDGPU::SDWA;
3419
Sam Kolton3025e7f2016-04-26 13:33:56 +00003420 SMLoc S = Parser.getTok().getLoc();
3421 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00003422 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003423
Sam Kolton05ef1c92016-06-03 10:27:37 +00003424 res = parseStringWithPrefix(Prefix, Value);
3425 if (res != MatchOperand_Success) {
3426 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00003427 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003428
Sam Kolton3025e7f2016-04-26 13:33:56 +00003429 int64_t Int;
3430 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00003431 .Case("BYTE_0", SdwaSel::BYTE_0)
3432 .Case("BYTE_1", SdwaSel::BYTE_1)
3433 .Case("BYTE_2", SdwaSel::BYTE_2)
3434 .Case("BYTE_3", SdwaSel::BYTE_3)
3435 .Case("WORD_0", SdwaSel::WORD_0)
3436 .Case("WORD_1", SdwaSel::WORD_1)
3437 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00003438 .Default(0xffffffff);
3439 Parser.Lex(); // eat last token
3440
3441 if (Int == 0xffffffff) {
3442 return MatchOperand_ParseFail;
3443 }
3444
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003445 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00003446 return MatchOperand_Success;
3447}
3448
Alex Bradbury58eba092016-11-01 16:32:05 +00003449OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00003450AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003451 using namespace llvm::AMDGPU::SDWA;
3452
Sam Kolton3025e7f2016-04-26 13:33:56 +00003453 SMLoc S = Parser.getTok().getLoc();
3454 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00003455 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00003456
3457 res = parseStringWithPrefix("dst_unused", Value);
3458 if (res != MatchOperand_Success) {
3459 return res;
3460 }
3461
3462 int64_t Int;
3463 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00003464 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
3465 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
3466 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00003467 .Default(0xffffffff);
3468 Parser.Lex(); // eat last token
3469
3470 if (Int == 0xffffffff) {
3471 return MatchOperand_ParseFail;
3472 }
3473
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003474 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00003475 return MatchOperand_Success;
3476}
3477
Sam Kolton945231a2016-06-10 09:57:59 +00003478void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00003479 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003480}
3481
Sam Kolton945231a2016-06-10 09:57:59 +00003482void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00003483 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
3484}
3485
3486void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
3487 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003488}
3489
3490void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00003491 uint64_t BasicInstType) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003492 OptionalImmIndexMap OptionalIdx;
3493
3494 unsigned I = 1;
3495 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3496 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3497 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3498 }
3499
3500 for (unsigned E = Operands.size(); I != E; ++I) {
3501 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3502 // Add the register arguments
Matt Arsenault5f45e782017-01-09 18:44:11 +00003503 if ((BasicInstType == SIInstrFlags::VOPC ||
Sam Koltone66365e2016-12-27 10:06:42 +00003504 BasicInstType == SIInstrFlags::VOP2)&&
Sam Kolton5196b882016-07-01 09:59:21 +00003505 Op.isReg() &&
3506 Op.Reg.RegNo == AMDGPU::VCC) {
Sam Koltone66365e2016-12-27 10:06:42 +00003507 // VOPC and VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
3508 // Skip it.
Sam Kolton5196b882016-07-01 09:59:21 +00003509 continue;
Sam Koltona3ec5c12016-10-07 14:46:06 +00003510 } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003511 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003512 } else if (Op.isImm()) {
3513 // Handle optional arguments
3514 OptionalIdx[Op.getImmTy()] = I;
3515 } else {
3516 llvm_unreachable("Invalid operand type");
3517 }
3518 }
3519
Sam Kolton945231a2016-06-10 09:57:59 +00003520 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003521
Sam Koltona568e3d2016-12-22 12:57:41 +00003522 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
3523 // V_NOP_sdwa_vi has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00003524 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003525 case SIInstrFlags::VOP1:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003526 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3527 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3528 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3529 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003530
3531 case SIInstrFlags::VOP2:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003532 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3533 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3534 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3535 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3536 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003537
3538 case SIInstrFlags::VOPC:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003539 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3540 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3541 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003542
Sam Koltona3ec5c12016-10-07 14:46:06 +00003543 default:
3544 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
3545 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00003546 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003547
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003548 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003549 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00003550 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
3551 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003552 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003553 std::advance(
3554 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003555 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00003556 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00003557
Sam Kolton05ef1c92016-06-03 10:27:37 +00003558}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003559
Tom Stellard45bb48e2015-06-13 03:28:10 +00003560/// Force static initialization.
3561extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00003562 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
3563 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003564}
3565
3566#define GET_REGISTER_MATCHER
3567#define GET_MATCHER_IMPLEMENTATION
3568#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00003569
Sam Kolton11de3702016-05-24 12:38:33 +00003570// This fuction should be defined after auto-generated include so that we have
3571// MatchClassKind enum defined
3572unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
3573 unsigned Kind) {
3574 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00003575 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00003576 // operand. This method checks if we are given immediate operand but expect to
3577 // get corresponding token.
3578 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
3579 switch (Kind) {
3580 case MCK_addr64:
3581 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
3582 case MCK_gds:
3583 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
3584 case MCK_glc:
3585 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
3586 case MCK_idxen:
3587 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
3588 case MCK_offen:
3589 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003590 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00003591 // When operands have expression values, they will return true for isToken,
3592 // because it is not possible to distinguish between a token and an
3593 // expression at parse time. MatchInstructionImpl() will always try to
3594 // match an operand as a token, when isToken returns true, and when the
3595 // name of the expression is not a valid token, the match will fail,
3596 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003597 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
3598 case MCK_SSrcF32:
3599 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00003600 case MCK_SoppBrTarget:
3601 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003602 case MCK_VReg32OrOff:
3603 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00003604 case MCK_InterpSlot:
3605 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
3606 case MCK_Attr:
3607 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
3608 case MCK_AttrChan:
3609 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003610 default:
3611 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00003612 }
3613}