blob: bab7d849d058f44ea83449f90914a7d870066d4c [file] [log] [blame]
Tim Northovere0e3aef2013-01-31 12:12:40 +00001//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10
Tim Northovere0e3aef2013-01-31 12:12:40 +000011#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "MCTargetDesc/AArch64MCExpr.h"
Tim Northover969afbe2013-02-05 13:24:47 +000013#include "Utils/AArch64BaseInfo.h"
Tim Northovere0e3aef2013-01-31 12:12:40 +000014#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCInst.h"
20#include "llvm/MC/MCSubtargetInfo.h"
21#include "llvm/MC/MCTargetAsmParser.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCRegisterInfo.h"
24#include "llvm/MC/MCStreamer.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/raw_ostream.h"
30#include "llvm/Support/TargetRegistry.h"
31
32using namespace llvm;
33
34namespace {
35
36class AArch64Operand;
37
38class AArch64AsmParser : public MCTargetAsmParser {
39 MCSubtargetInfo &STI;
40 MCAsmParser &Parser;
41
42#define GET_ASSEMBLER_HEADER
43#include "AArch64GenAsmMatcher.inc"
44
45public:
Tim Northover60baeb92013-02-11 09:29:37 +000046 enum AArch64MatchResultTy {
47 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
48#define GET_OPERAND_DIAGNOSTIC_TYPES
49#include "AArch64GenAsmMatcher.inc"
50 };
51
Tim Northovere0e3aef2013-01-31 12:12:40 +000052 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
53 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
54 MCAsmParserExtension::Initialize(_Parser);
55
56 // Initialize the set of available features.
57 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
58 }
59
60 // These are the public interface of the MCTargetAsmParser
61 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
Tim Northoverbcaca872013-02-05 13:24:56 +000062 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Tim Northovere0e3aef2013-01-31 12:12:40 +000063 SMLoc NameLoc,
64 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
65
66 bool ParseDirective(AsmToken DirectiveID);
67 bool ParseDirectiveTLSDescCall(SMLoc L);
68 bool ParseDirectiveWord(unsigned Size, SMLoc L);
69
70 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
71 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
72 MCStreamer&Out, unsigned &ErrorInfo,
73 bool MatchingInlineAsm);
74
75 // The rest of the sub-parsers have more freedom over interface: they return
76 // an OperandMatchResultTy because it's less ambiguous than true/false or
77 // -1/0/1 even if it is more verbose
78 OperandMatchResultTy
79 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
80 StringRef Mnemonic);
81
82 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
83
84 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
85
86 OperandMatchResultTy
87 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
88 uint32_t NumLanes);
89
90 OperandMatchResultTy
91 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
92 uint32_t &NumLanes);
93
94 OperandMatchResultTy
95 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
96
97 OperandMatchResultTy
98 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
99
100 OperandMatchResultTy
101 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
102
103 OperandMatchResultTy
104 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
105
106 template<typename SomeNamedImmMapper> OperandMatchResultTy
107 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
108 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
109 }
110
111 OperandMatchResultTy
112 ParseNamedImmOperand(const NamedImmMapper &Mapper,
113 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
114
115 OperandMatchResultTy
116 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
117
118 OperandMatchResultTy
119 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
120
121 OperandMatchResultTy
122 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
123
124 bool validateInstruction(MCInst &Inst,
Tim Northoverbcaca872013-02-05 13:24:56 +0000125 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000126
127 /// Scan the next token (which had better be an identifier) and determine
128 /// whether it represents a general-purpose or vector register. It returns
129 /// true if an identifier was found and populates its reference arguments. It
130 /// does not consume the token.
131 bool
132 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
133 SMLoc &LayoutLoc) const;
134
135};
136
137}
138
139namespace {
140
141/// Instances of this class represent a parsed AArch64 machine instruction.
142class AArch64Operand : public MCParsedAsmOperand {
143private:
144 enum KindTy {
145 k_ImmWithLSL, // #uimm {, LSL #amt }
146 k_CondCode, // eq/ne/...
147 k_FPImmediate, // Limited-precision floating-point imm
148 k_Immediate, // Including expressions referencing symbols
149 k_Register,
150 k_ShiftExtend,
151 k_SysReg, // The register operand of MRS and MSR instructions
152 k_Token, // The mnemonic; other raw tokens the auto-generated
153 k_WrappedRegister // Load/store exclusive permit a wrapped register.
154 } Kind;
155
156 SMLoc StartLoc, EndLoc;
157
158 union {
159 struct {
160 const MCExpr *Val;
161 unsigned ShiftAmount;
162 bool ImplicitAmount;
163 } ImmWithLSL;
164
165 struct {
166 A64CC::CondCodes Code;
167 } CondCode;
168
169 struct {
170 double Val;
171 } FPImm;
172
173 struct {
174 const MCExpr *Val;
175 } Imm;
176
177 struct {
178 unsigned RegNum;
179 } Reg;
180
181 struct {
182 A64SE::ShiftExtSpecifiers ShiftType;
183 unsigned Amount;
184 bool ImplicitAmount;
185 } ShiftExtend;
186
187 struct {
188 const char *Data;
189 unsigned Length;
190 } SysReg;
191
192 struct {
193 const char *Data;
194 unsigned Length;
195 } Tok;
196 };
197
198 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
199 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
200
201public:
202 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
203 }
204
205 SMLoc getStartLoc() const { return StartLoc; }
206 SMLoc getEndLoc() const { return EndLoc; }
207 void print(raw_ostream&) const;
208 void dump() const;
209
210 StringRef getToken() const {
211 assert(Kind == k_Token && "Invalid access!");
212 return StringRef(Tok.Data, Tok.Length);
213 }
214
215 unsigned getReg() const {
216 assert((Kind == k_Register || Kind == k_WrappedRegister)
217 && "Invalid access!");
218 return Reg.RegNum;
219 }
220
221 const MCExpr *getImm() const {
222 assert(Kind == k_Immediate && "Invalid access!");
223 return Imm.Val;
224 }
225
226 A64CC::CondCodes getCondCode() const {
227 assert(Kind == k_CondCode && "Invalid access!");
228 return CondCode.Code;
229 }
230
231 static bool isNonConstantExpr(const MCExpr *E,
232 AArch64MCExpr::VariantKind &Variant) {
233 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
234 Variant = A64E->getKind();
235 return true;
236 } else if (!isa<MCConstantExpr>(E)) {
237 Variant = AArch64MCExpr::VK_AARCH64_None;
238 return true;
239 }
240
241 return false;
242 }
243
244 bool isCondCode() const { return Kind == k_CondCode; }
245 bool isToken() const { return Kind == k_Token; }
246 bool isReg() const { return Kind == k_Register; }
247 bool isImm() const { return Kind == k_Immediate; }
248 bool isMem() const { return false; }
249 bool isFPImm() const { return Kind == k_FPImmediate; }
250 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
251 bool isSysReg() const { return Kind == k_SysReg; }
252 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
253 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
254
255 bool isAddSubImmLSL0() const {
256 if (!isImmWithLSL()) return false;
257 if (ImmWithLSL.ShiftAmount != 0) return false;
258
259 AArch64MCExpr::VariantKind Variant;
260 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
261 return Variant == AArch64MCExpr::VK_AARCH64_LO12
262 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
263 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
264 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
265 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
266 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
267 }
268
269 // Otherwise it should be a real immediate in range:
270 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
271 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
272 }
273
274 bool isAddSubImmLSL12() const {
275 if (!isImmWithLSL()) return false;
276 if (ImmWithLSL.ShiftAmount != 12) return false;
277
278 AArch64MCExpr::VariantKind Variant;
279 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
280 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
281 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
282 }
283
284 // Otherwise it should be a real immediate in range:
285 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
286 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
287 }
288
289 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
290 if (!isShiftOrExtend()) return false;
291
292 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
293 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
294 return false;
295
296 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
297 return false;
298
299 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
300 }
301
302 bool isAdrpLabel() const {
303 if (!isImm()) return false;
304
305 AArch64MCExpr::VariantKind Variant;
306 if (isNonConstantExpr(getImm(), Variant)) {
307 return Variant == AArch64MCExpr::VK_AARCH64_None
308 || Variant == AArch64MCExpr::VK_AARCH64_GOT
309 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
310 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
311 }
312
313 return isLabel<21, 4096>();
314 }
315
316 template<unsigned RegWidth> bool isBitfieldWidth() const {
317 if (!isImm()) return false;
318
319 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
320 if (!CE) return false;
321
322 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
323 }
324
325 template<int RegWidth>
326 bool isCVTFixedPos() const {
327 if (!isImm()) return false;
328
329 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
330 if (!CE) return false;
331
332 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
333 }
334
335 bool isFMOVImm() const {
336 if (!isFPImm()) return false;
337
338 APFloat RealVal(FPImm.Val);
339 uint32_t ImmVal;
340 return A64Imms::isFPImm(RealVal, ImmVal);
341 }
342
343 bool isFPZero() const {
344 if (!isFPImm()) return false;
345
346 APFloat RealVal(FPImm.Val);
347 return RealVal.isPosZero();
348 }
349
350 template<unsigned field_width, unsigned scale>
351 bool isLabel() const {
352 if (!isImm()) return false;
353
354 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
355 return true;
356 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
357 int64_t Val = CE->getValue();
358 int64_t Min = - (scale * (1LL << (field_width - 1)));
359 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
360 return (Val % scale) == 0 && Val >= Min && Val <= Max;
361 }
362
363 // N.b. this disallows explicit relocation specifications via an
364 // AArch64MCExpr. Users needing that behaviour
365 return false;
366 }
367
368 bool isLane1() const {
369 if (!isImm()) return false;
370
371 // Because it's come through custom assembly parsing, it must always be a
372 // constant expression.
373 return cast<MCConstantExpr>(getImm())->getValue() == 1;
374 }
375
376 bool isLoadLitLabel() const {
377 if (!isImm()) return false;
378
379 AArch64MCExpr::VariantKind Variant;
380 if (isNonConstantExpr(getImm(), Variant)) {
381 return Variant == AArch64MCExpr::VK_AARCH64_None
382 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
383 }
384
385 return isLabel<19, 4>();
386 }
387
388 template<unsigned RegWidth> bool isLogicalImm() const {
389 if (!isImm()) return false;
390
391 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
392 if (!CE) return false;
393
394 uint32_t Bits;
395 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
396 }
397
398 template<unsigned RegWidth> bool isLogicalImmMOV() const {
399 if (!isLogicalImm<RegWidth>()) return false;
400
401 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
402
403 // The move alias for ORR is only valid if the immediate cannot be
404 // represented with a move (immediate) instruction; they take priority.
405 int UImm16, Shift;
406 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
407 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
408 }
409
410 template<int MemSize>
411 bool isOffsetUImm12() const {
412 if (!isImm()) return false;
413
414 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
415
416 // Assume they know what they're doing for now if they've given us a
417 // non-constant expression. In principle we could check for ridiculous
418 // things that can't possibly work or relocations that would almost
419 // certainly break resulting code.
420 if (!CE)
421 return true;
422
423 int64_t Val = CE->getValue();
424
425 // Must be a multiple of the access size in bytes.
426 if ((Val & (MemSize - 1)) != 0) return false;
427
428 // Must be 12-bit unsigned
429 return Val >= 0 && Val <= 0xfff * MemSize;
430 }
431
432 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
433 bool isShift() const {
434 if (!isShiftOrExtend()) return false;
435
436 if (ShiftExtend.ShiftType != SHKind)
437 return false;
438
439 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
440 }
441
442 bool isMOVN32Imm() const {
443 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
444 AArch64MCExpr::VK_AARCH64_SABS_G0,
445 AArch64MCExpr::VK_AARCH64_SABS_G1,
446 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
447 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
448 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
449 AArch64MCExpr::VK_AARCH64_TPREL_G1,
450 AArch64MCExpr::VK_AARCH64_TPREL_G0,
451 };
452 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
453
454 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
455 }
456
457 bool isMOVN64Imm() const {
458 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
459 AArch64MCExpr::VK_AARCH64_SABS_G0,
460 AArch64MCExpr::VK_AARCH64_SABS_G1,
461 AArch64MCExpr::VK_AARCH64_SABS_G2,
462 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
463 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
464 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
465 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
466 AArch64MCExpr::VK_AARCH64_TPREL_G2,
467 AArch64MCExpr::VK_AARCH64_TPREL_G1,
468 AArch64MCExpr::VK_AARCH64_TPREL_G0,
469 };
470 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
471
472 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
473 }
474
475
476 bool isMOVZ32Imm() const {
477 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
478 AArch64MCExpr::VK_AARCH64_ABS_G0,
479 AArch64MCExpr::VK_AARCH64_ABS_G1,
480 AArch64MCExpr::VK_AARCH64_SABS_G0,
481 AArch64MCExpr::VK_AARCH64_SABS_G1,
482 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
483 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
484 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
485 AArch64MCExpr::VK_AARCH64_TPREL_G1,
486 AArch64MCExpr::VK_AARCH64_TPREL_G0,
487 };
488 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
489
490 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
491 }
492
493 bool isMOVZ64Imm() const {
494 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
495 AArch64MCExpr::VK_AARCH64_ABS_G0,
496 AArch64MCExpr::VK_AARCH64_ABS_G1,
497 AArch64MCExpr::VK_AARCH64_ABS_G2,
498 AArch64MCExpr::VK_AARCH64_ABS_G3,
499 AArch64MCExpr::VK_AARCH64_SABS_G0,
500 AArch64MCExpr::VK_AARCH64_SABS_G1,
501 AArch64MCExpr::VK_AARCH64_SABS_G2,
502 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
503 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
504 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
505 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
506 AArch64MCExpr::VK_AARCH64_TPREL_G2,
507 AArch64MCExpr::VK_AARCH64_TPREL_G1,
508 AArch64MCExpr::VK_AARCH64_TPREL_G0,
509 };
510 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
511
512 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
513 }
514
515 bool isMOVK32Imm() const {
516 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
517 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
518 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
519 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
520 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
521 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
522 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
523 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
524 };
525 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
526
527 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
528 }
529
530 bool isMOVK64Imm() const {
531 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
532 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
533 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
534 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
535 AArch64MCExpr::VK_AARCH64_ABS_G3,
536 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
537 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
538 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
539 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
540 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
541 };
542 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
543
544 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
545 }
546
547 bool isMoveWideImm(unsigned RegWidth,
548 AArch64MCExpr::VariantKind *PermittedModifiers,
549 unsigned NumModifiers) const {
550 if (!isImmWithLSL()) return false;
551
552 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
553 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
554
555 AArch64MCExpr::VariantKind Modifier;
556 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
557 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
558 if (!ImmWithLSL.ImplicitAmount) return false;
559
560 for (unsigned i = 0; i < NumModifiers; ++i)
561 if (PermittedModifiers[i] == Modifier) return true;
562
563 return false;
564 }
565
566 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
567 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
568 }
569
570 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
571 bool isMoveWideMovAlias() const {
572 if (!isImm()) return false;
573
574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
575 if (!CE) return false;
576
577 int UImm16, Shift;
578 uint64_t Value = CE->getValue();
579
580 // If this is a 32-bit instruction then all bits above 32 should be the
581 // same: either of these is fine because signed/unsigned values should be
582 // permitted.
583 if (RegWidth == 32) {
584 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
585 return false;
586
587 Value &= 0xffffffffULL;
588 }
589
590 return isValidImm(RegWidth, Value, UImm16, Shift);
591 }
592
593 bool isMSRWithReg() const {
594 if (!isSysReg()) return false;
595
596 bool IsKnownRegister;
597 StringRef Name(SysReg.Data, SysReg.Length);
598 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
599
600 return IsKnownRegister;
601 }
602
603 bool isMSRPState() const {
604 if (!isSysReg()) return false;
605
606 bool IsKnownRegister;
607 StringRef Name(SysReg.Data, SysReg.Length);
608 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
609
610 return IsKnownRegister;
611 }
612
613 bool isMRS() const {
614 if (!isSysReg()) return false;
615
616 // First check against specific MSR-only (write-only) registers
617 bool IsKnownRegister;
618 StringRef Name(SysReg.Data, SysReg.Length);
619 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
620
621 return IsKnownRegister;
622 }
623
624 bool isPRFM() const {
625 if (!isImm()) return false;
626
627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
628
629 if (!CE)
630 return false;
631
632 return CE->getValue() >= 0 && CE->getValue() <= 31;
633 }
634
635 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
636 if (!isShiftOrExtend()) return false;
637
638 if (ShiftExtend.ShiftType != SHKind)
639 return false;
640
641 return ShiftExtend.Amount <= 4;
642 }
643
644 bool isRegExtendLSL() const {
645 if (!isShiftOrExtend()) return false;
646
647 if (ShiftExtend.ShiftType != A64SE::LSL)
648 return false;
649
650 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
651 }
652
653 template<int MemSize> bool isSImm7Scaled() const {
654 if (!isImm()) return false;
655
656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
657 if (!CE) return false;
658
659 int64_t Val = CE->getValue();
660 if (Val % MemSize != 0) return false;
661
662 Val /= MemSize;
663
664 return Val >= -64 && Val < 64;
665 }
666
667 template<int BitWidth>
668 bool isSImm() const {
669 if (!isImm()) return false;
670
671 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672 if (!CE) return false;
673
674 return CE->getValue() >= -(1LL << (BitWidth - 1))
675 && CE->getValue() < (1LL << (BitWidth - 1));
676 }
677
678 template<int bitWidth>
679 bool isUImm() const {
680 if (!isImm()) return false;
681
682 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683 if (!CE) return false;
684
685 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
686 }
687
688 bool isUImm() const {
689 if (!isImm()) return false;
690
691 return isa<MCConstantExpr>(getImm());
692 }
693
694 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
695 unsigned ShiftAmount,
696 bool ImplicitAmount,
697 SMLoc S, SMLoc E) {
698 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
699 Op->ImmWithLSL.Val = Val;
700 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
701 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
702 return Op;
703 }
704
705 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
706 SMLoc S, SMLoc E) {
707 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
708 Op->CondCode.Code = Code;
709 return Op;
710 }
711
712 static AArch64Operand *CreateFPImm(double Val,
713 SMLoc S, SMLoc E) {
714 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
715 Op->FPImm.Val = Val;
716 return Op;
717 }
718
719 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
720 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
721 Op->Imm.Val = Val;
722 return Op;
723 }
724
725 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
726 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
727 Op->Reg.RegNum = RegNum;
728 return Op;
729 }
730
731 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
732 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
733 Op->Reg.RegNum = RegNum;
734 return Op;
735 }
736
737 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
738 unsigned Amount,
739 bool ImplicitAmount,
740 SMLoc S, SMLoc E) {
741 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
742 Op->ShiftExtend.ShiftType = ShiftTyp;
743 Op->ShiftExtend.Amount = Amount;
744 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
745 return Op;
746 }
747
748 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
749 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
750 Op->Tok.Data = Str.data();
751 Op->Tok.Length = Str.size();
752 return Op;
753 }
754
755 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
756 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
757 Op->Tok.Data = Str.data();
758 Op->Tok.Length = Str.size();
759 return Op;
760 }
761
762
763 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
764 // Add as immediates when possible.
765 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
766 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
767 else
768 Inst.addOperand(MCOperand::CreateExpr(Expr));
769 }
770
771 template<unsigned RegWidth>
772 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
773 assert(N == 1 && "Invalid number of operands!");
774 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
775 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
776 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
777 }
778
779 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
780 assert(N == 1 && "Invalid number of operands!");
781 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
782 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
783 }
784
785 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
786 assert(N == 1 && "Invalid number of operands!");
787
788 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
789 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
790
791 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
792 }
793
794 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
795 assert(N == 1 && "Invalid number of operands!");
796 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
797 }
798
799 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
800 assert(N == 1 && "Invalid number of operands!");
801
802 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
803 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
804 }
805
806 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
807 assert(N == 1 && "Invalid number of operands!");
808
809 APFloat RealVal(FPImm.Val);
810 uint32_t ImmVal;
811 A64Imms::isFPImm(RealVal, ImmVal);
812
813 Inst.addOperand(MCOperand::CreateImm(ImmVal));
814 }
815
816 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
817 assert(N == 1 && "Invalid number of operands");
818 Inst.addOperand(MCOperand::CreateImm(0));
819 }
820
821 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
822 assert(N == 1 && "Invalid number of operands!");
823 unsigned Encoded = A64InvertCondCode(getCondCode());
824 Inst.addOperand(MCOperand::CreateImm(Encoded));
825 }
826
827 void addRegOperands(MCInst &Inst, unsigned N) const {
828 assert(N == 1 && "Invalid number of operands!");
829 Inst.addOperand(MCOperand::CreateReg(getReg()));
830 }
831
832 void addImmOperands(MCInst &Inst, unsigned N) const {
833 assert(N == 1 && "Invalid number of operands!");
834 addExpr(Inst, getImm());
835 }
836
837 template<int MemSize>
838 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
839 assert(N == 1 && "Invalid number of operands!");
840
841 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
842 uint64_t Val = CE->getValue() / MemSize;
843 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
844 }
845
846 template<int BitWidth>
847 void addSImmOperands(MCInst &Inst, unsigned N) const {
848 assert(N == 1 && "Invalid number of operands!");
849
850 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
851 uint64_t Val = CE->getValue();
852 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
853 }
854
855 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
856 assert (N == 1 && "Invalid number of operands!");
857
858 addExpr(Inst, ImmWithLSL.Val);
859 }
860
861 template<unsigned field_width, unsigned scale>
862 void addLabelOperands(MCInst &Inst, unsigned N) const {
863 assert(N == 1 && "Invalid number of operands!");
864
865 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
866
867 if (!CE) {
868 addExpr(Inst, Imm.Val);
869 return;
870 }
871
872 int64_t Val = CE->getValue();
873 assert(Val % scale == 0 && "Unaligned immediate in instruction");
874 Val /= scale;
875
876 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
877 }
878
879 template<int MemSize>
880 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
881 assert(N == 1 && "Invalid number of operands!");
882
883 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
884 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
885 } else {
886 Inst.addOperand(MCOperand::CreateExpr(getImm()));
887 }
888 }
889
890 template<unsigned RegWidth>
891 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
892 assert(N == 1 && "Invalid number of operands");
893 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
894
895 uint32_t Bits;
896 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
897
898 Inst.addOperand(MCOperand::CreateImm(Bits));
899 }
900
901 void addMRSOperands(MCInst &Inst, unsigned N) const {
902 assert(N == 1 && "Invalid number of operands!");
903
904 bool Valid;
905 StringRef Name(SysReg.Data, SysReg.Length);
906 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
907
908 Inst.addOperand(MCOperand::CreateImm(Bits));
909 }
910
911 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
912 assert(N == 1 && "Invalid number of operands!");
913
914 bool Valid;
915 StringRef Name(SysReg.Data, SysReg.Length);
916 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
917
918 Inst.addOperand(MCOperand::CreateImm(Bits));
919 }
920
921 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
922 assert(N == 1 && "Invalid number of operands!");
923
924 bool Valid;
925 StringRef Name(SysReg.Data, SysReg.Length);
926 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
927
928 Inst.addOperand(MCOperand::CreateImm(Bits));
929 }
930
931 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
932 assert(N == 2 && "Invalid number of operands!");
933
934 addExpr(Inst, ImmWithLSL.Val);
935
936 AArch64MCExpr::VariantKind Variant;
937 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
938 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
939 return;
940 }
941
942 // We know it's relocated
943 switch (Variant) {
944 case AArch64MCExpr::VK_AARCH64_ABS_G0:
945 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
946 case AArch64MCExpr::VK_AARCH64_SABS_G0:
947 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
948 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
949 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
950 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
951 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
952 Inst.addOperand(MCOperand::CreateImm(0));
953 break;
954 case AArch64MCExpr::VK_AARCH64_ABS_G1:
955 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
956 case AArch64MCExpr::VK_AARCH64_SABS_G1:
957 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
958 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
959 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
960 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
961 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
962 Inst.addOperand(MCOperand::CreateImm(1));
963 break;
964 case AArch64MCExpr::VK_AARCH64_ABS_G2:
965 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
966 case AArch64MCExpr::VK_AARCH64_SABS_G2:
967 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
968 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
969 Inst.addOperand(MCOperand::CreateImm(2));
970 break;
971 case AArch64MCExpr::VK_AARCH64_ABS_G3:
972 Inst.addOperand(MCOperand::CreateImm(3));
973 break;
974 default: llvm_unreachable("Inappropriate move wide relocation");
975 }
976 }
977
978 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
979 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
980 assert(N == 2 && "Invalid number of operands!");
981 int UImm16, Shift;
982
983 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
984 uint64_t Value = CE->getValue();
985
986 if (RegWidth == 32) {
987 Value &= 0xffffffffULL;
988 }
989
990 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
991 (void)Valid;
992 assert(Valid && "Invalid immediates should have been weeded out by now");
993
994 Inst.addOperand(MCOperand::CreateImm(UImm16));
995 Inst.addOperand(MCOperand::CreateImm(Shift));
996 }
997
998 void addPRFMOperands(MCInst &Inst, unsigned N) const {
999 assert(N == 1 && "Invalid number of operands!");
1000
1001 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1002 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1003 && "PRFM operand should be 5-bits");
1004
1005 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1006 }
1007
1008 // For Add-sub (extended register) operands.
1009 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1010 assert(N == 1 && "Invalid number of operands!");
1011
1012 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1013 }
1014
1015 // For the extend in load-store (register offset) instructions.
1016 template<unsigned MemSize>
1017 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1018 addAddrRegExtendOperands(Inst, N, MemSize);
1019 }
1020
1021 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1022 unsigned MemSize) const {
1023 assert(N == 1 && "Invalid number of operands!");
1024
1025 // First bit of Option is set in instruction classes, the high two bits are
1026 // as follows:
1027 unsigned OptionHi = 0;
1028 switch (ShiftExtend.ShiftType) {
1029 case A64SE::UXTW:
1030 case A64SE::LSL:
1031 OptionHi = 1;
1032 break;
1033 case A64SE::SXTW:
1034 case A64SE::SXTX:
1035 OptionHi = 3;
1036 break;
1037 default:
1038 llvm_unreachable("Invalid extend type for register offset");
1039 }
1040
1041 unsigned S = 0;
1042 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1043 S = 1;
1044 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1045 S = 1;
1046
1047 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1048 }
1049 void addShiftOperands(MCInst &Inst, unsigned N) const {
1050 assert(N == 1 && "Invalid number of operands!");
1051
1052 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1053 }
1054};
1055
1056} // end anonymous namespace.
1057
1058AArch64AsmParser::OperandMatchResultTy
1059AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1060 StringRef Mnemonic) {
1061
1062 // See if the operand has a custom parser
1063 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1064
1065 // It could either succeed, fail or just not care.
1066 if (ResTy != MatchOperand_NoMatch)
1067 return ResTy;
1068
1069 switch (getLexer().getKind()) {
1070 default:
1071 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1072 return MatchOperand_ParseFail;
1073 case AsmToken::Identifier: {
1074 // It might be in the LSL/UXTB family ...
1075 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1076
1077 // We can only continue if no tokens were eaten.
1078 if (GotShift != MatchOperand_NoMatch)
1079 return GotShift;
1080
1081 // ... or it might be a register ...
1082 uint32_t NumLanes = 0;
1083 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1084 assert(GotReg != MatchOperand_ParseFail
1085 && "register parsing shouldn't partially succeed");
1086
1087 if (GotReg == MatchOperand_Success) {
1088 if (Parser.getTok().is(AsmToken::LBrac))
1089 return ParseNEONLane(Operands, NumLanes);
1090 else
1091 return MatchOperand_Success;
1092 }
1093
1094 // ... or it might be a symbolish thing
1095 }
1096 // Fall through
1097 case AsmToken::LParen: // E.g. (strcmp-4)
1098 case AsmToken::Integer: // 1f, 2b labels
1099 case AsmToken::String: // quoted labels
1100 case AsmToken::Dot: // . is Current location
1101 case AsmToken::Dollar: // $ is PC
1102 case AsmToken::Colon: {
1103 SMLoc StartLoc = Parser.getTok().getLoc();
1104 SMLoc EndLoc;
1105 const MCExpr *ImmVal = 0;
1106
1107 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1108 return MatchOperand_ParseFail;
1109
1110 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1111 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1112 return MatchOperand_Success;
1113 }
1114 case AsmToken::Hash: { // Immediates
1115 SMLoc StartLoc = Parser.getTok().getLoc();
1116 SMLoc EndLoc;
1117 const MCExpr *ImmVal = 0;
1118 Parser.Lex();
1119
1120 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1121 return MatchOperand_ParseFail;
1122
1123 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1124 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1125 return MatchOperand_Success;
1126 }
1127 case AsmToken::LBrac: {
1128 SMLoc Loc = Parser.getTok().getLoc();
1129 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1130 Parser.Lex(); // Eat '['
1131
1132 // There's no comma after a '[', so we can parse the next operand
1133 // immediately.
1134 return ParseOperand(Operands, Mnemonic);
1135 }
1136 // The following will likely be useful later, but not in very early cases
1137 case AsmToken::LCurly: // Weird SIMD lists
1138 llvm_unreachable("Don't know how to deal with '{' in operand");
1139 return MatchOperand_ParseFail;
1140 }
1141}
1142
1143AArch64AsmParser::OperandMatchResultTy
1144AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1145 if (getLexer().is(AsmToken::Colon)) {
1146 AArch64MCExpr::VariantKind RefKind;
1147
1148 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1149 if (ResTy != MatchOperand_Success)
1150 return ResTy;
1151
1152 const MCExpr *SubExprVal;
1153 if (getParser().ParseExpression(SubExprVal))
1154 return MatchOperand_ParseFail;
1155
1156 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1157 return MatchOperand_Success;
1158 }
1159
1160 // No weird AArch64MCExpr prefix
1161 return getParser().ParseExpression(ExprVal)
1162 ? MatchOperand_ParseFail : MatchOperand_Success;
1163}
1164
1165// A lane attached to a NEON register. "[N]", which should yield three tokens:
1166// '[', N, ']'. A hash is not allowed to precede the immediate here.
1167AArch64AsmParser::OperandMatchResultTy
1168AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1169 uint32_t NumLanes) {
1170 SMLoc Loc = Parser.getTok().getLoc();
1171
1172 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1173 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1174 Parser.Lex(); // Eat '['
1175
1176 if (Parser.getTok().isNot(AsmToken::Integer)) {
1177 Error(Parser.getTok().getLoc(), "expected lane number");
1178 return MatchOperand_ParseFail;
1179 }
1180
1181 if (Parser.getTok().getIntVal() >= NumLanes) {
1182 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1183 return MatchOperand_ParseFail;
1184 }
1185
1186 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1187 getContext());
1188 SMLoc S = Parser.getTok().getLoc();
1189 Parser.Lex(); // Eat actual lane
1190 SMLoc E = Parser.getTok().getLoc();
1191 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1192
1193
1194 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1195 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1196 return MatchOperand_ParseFail;
1197 }
1198
1199 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1200 Parser.Lex(); // Eat ']'
1201
1202 return MatchOperand_Success;
1203}
1204
1205AArch64AsmParser::OperandMatchResultTy
1206AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1207 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1208 Parser.Lex();
1209
1210 if (getLexer().isNot(AsmToken::Identifier)) {
1211 Error(Parser.getTok().getLoc(),
1212 "expected relocation specifier in operand after ':'");
1213 return MatchOperand_ParseFail;
1214 }
1215
Tim Northover24937c12013-02-04 15:44:38 +00001216 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1217 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
Tim Northovere0e3aef2013-01-31 12:12:40 +00001218 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1219 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1220 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1221 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1222 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1223 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1224 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1225 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1226 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1227 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1228 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1229 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1230 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1231 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1232 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1233 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1234 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1235 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1236 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1237 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1238 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1239 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1240 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1241 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1242 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1243 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1244 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1245 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1246 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1247 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1248 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1249 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1250 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1251 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1252 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1253 .Default(AArch64MCExpr::VK_AARCH64_None);
1254
1255 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1256 Error(Parser.getTok().getLoc(),
1257 "expected relocation specifier in operand after ':'");
1258 return MatchOperand_ParseFail;
1259 }
1260 Parser.Lex(); // Eat identifier
1261
1262 if (getLexer().isNot(AsmToken::Colon)) {
1263 Error(Parser.getTok().getLoc(),
1264 "expected ':' after relocation specifier");
1265 return MatchOperand_ParseFail;
1266 }
1267 Parser.Lex();
1268 return MatchOperand_Success;
1269}
1270
1271AArch64AsmParser::OperandMatchResultTy
1272AArch64AsmParser::ParseImmWithLSLOperand(
1273 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1274 // FIXME?: I want to live in a world where immediates must start with
1275 // #. Please don't dash my hopes (well, do if you have a good reason).
1276 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1277
1278 SMLoc S = Parser.getTok().getLoc();
1279 Parser.Lex(); // Eat '#'
1280
1281 const MCExpr *Imm;
1282 if (ParseImmediate(Imm) != MatchOperand_Success)
1283 return MatchOperand_ParseFail;
1284 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1285 SMLoc E = Parser.getTok().getLoc();
1286 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1287 return MatchOperand_Success;
1288 }
1289
1290 // Eat ','
1291 Parser.Lex();
1292
1293 // The optional operand must be "lsl #N" where N is non-negative.
1294 if (Parser.getTok().is(AsmToken::Identifier)
1295 && Parser.getTok().getIdentifier().lower() == "lsl") {
1296 Parser.Lex();
1297
1298 if (Parser.getTok().is(AsmToken::Hash)) {
1299 Parser.Lex();
1300
1301 if (Parser.getTok().isNot(AsmToken::Integer)) {
1302 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1303 return MatchOperand_ParseFail;
1304 }
1305 }
1306 }
1307
1308 int64_t ShiftAmount = Parser.getTok().getIntVal();
1309
1310 if (ShiftAmount < 0) {
1311 Error(Parser.getTok().getLoc(), "positive shift amount required");
1312 return MatchOperand_ParseFail;
1313 }
1314 Parser.Lex(); // Eat the number
1315
1316 SMLoc E = Parser.getTok().getLoc();
1317 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1318 false, S, E));
1319 return MatchOperand_Success;
1320}
1321
1322
1323AArch64AsmParser::OperandMatchResultTy
1324AArch64AsmParser::ParseCondCodeOperand(
1325 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1326 if (Parser.getTok().isNot(AsmToken::Identifier))
1327 return MatchOperand_NoMatch;
1328
1329 StringRef Tok = Parser.getTok().getIdentifier();
1330 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1331
1332 if (CondCode == A64CC::Invalid)
1333 return MatchOperand_NoMatch;
1334
1335 SMLoc S = Parser.getTok().getLoc();
1336 Parser.Lex(); // Eat condition code
1337 SMLoc E = Parser.getTok().getLoc();
1338
1339 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1340 return MatchOperand_Success;
1341}
1342
1343AArch64AsmParser::OperandMatchResultTy
1344AArch64AsmParser::ParseCRxOperand(
1345 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1346 SMLoc S = Parser.getTok().getLoc();
1347 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1348 Error(S, "Expected cN operand where 0 <= N <= 15");
1349 return MatchOperand_ParseFail;
1350 }
1351
Tim Northover24937c12013-02-04 15:44:38 +00001352 std::string LowerTok = Parser.getTok().getIdentifier().lower();
1353 StringRef Tok(LowerTok);
Tim Northovere0e3aef2013-01-31 12:12:40 +00001354 if (Tok[0] != 'c') {
1355 Error(S, "Expected cN operand where 0 <= N <= 15");
1356 return MatchOperand_ParseFail;
1357 }
1358
1359 uint32_t CRNum;
1360 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1361 if (BadNum || CRNum > 15) {
1362 Error(S, "Expected cN operand where 0 <= N <= 15");
1363 return MatchOperand_ParseFail;
1364 }
1365
1366 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1367
1368 Parser.Lex();
1369 SMLoc E = Parser.getTok().getLoc();
1370
1371 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1372 return MatchOperand_Success;
1373}
1374
1375AArch64AsmParser::OperandMatchResultTy
1376AArch64AsmParser::ParseFPImmOperand(
1377 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1378
1379 // FIXME?: I want to live in a world where immediates must start with
1380 // #. Please don't dash my hopes (well, do if you have a good reason).
1381 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1382
1383 SMLoc S = Parser.getTok().getLoc();
1384 Parser.Lex(); // Eat '#'
1385
1386 bool Negative = false;
1387 if (Parser.getTok().is(AsmToken::Minus)) {
1388 Negative = true;
1389 Parser.Lex(); // Eat '-'
1390 } else if (Parser.getTok().is(AsmToken::Plus)) {
1391 Parser.Lex(); // Eat '+'
1392 }
1393
1394 if (Parser.getTok().isNot(AsmToken::Real)) {
1395 Error(S, "Expected floating-point immediate");
1396 return MatchOperand_ParseFail;
1397 }
1398
1399 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1400 if (Negative) RealVal.changeSign();
1401 double DblVal = RealVal.convertToDouble();
1402
1403 Parser.Lex(); // Eat real number
1404 SMLoc E = Parser.getTok().getLoc();
1405
1406 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1407 return MatchOperand_Success;
1408}
1409
1410
1411// Automatically generated
1412static unsigned MatchRegisterName(StringRef Name);
1413
1414bool
1415AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1416 StringRef &Layout,
1417 SMLoc &LayoutLoc) const {
1418 const AsmToken &Tok = Parser.getTok();
1419
1420 if (Tok.isNot(AsmToken::Identifier))
1421 return false;
1422
1423 std::string LowerReg = Tok.getString().lower();
1424 size_t DotPos = LowerReg.find('.');
1425
1426 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1427 if (RegNum == AArch64::NoRegister) {
1428 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1429 .Case("ip0", AArch64::X16)
1430 .Case("ip1", AArch64::X17)
1431 .Case("fp", AArch64::X29)
1432 .Case("lr", AArch64::X30)
1433 .Default(AArch64::NoRegister);
1434 }
1435 if (RegNum == AArch64::NoRegister)
1436 return false;
1437
1438 SMLoc S = Tok.getLoc();
1439 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1440
1441 if (DotPos == StringRef::npos) {
1442 Layout = StringRef();
1443 } else {
1444 // Everything afterwards needs to be a literal token, expected to be
1445 // '.2d','.b' etc for vector registers.
1446
1447 // This StringSwitch validates the input and (perhaps more importantly)
1448 // gives us a permanent string to use in the token (a pointer into LowerReg
1449 // would go out of scope when we return).
1450 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
Tim Northover96e49462013-02-05 15:01:51 +00001451 std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1452 Layout = StringSwitch<const char *>(LayoutText)
Tim Northovere0e3aef2013-01-31 12:12:40 +00001453 .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1454 .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1455 .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1456 .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1457 .Default("");
1458
1459 if (Layout.size() == 0) {
1460 // Malformed register
1461 return false;
1462 }
1463 }
1464
1465 return true;
1466}
1467
1468AArch64AsmParser::OperandMatchResultTy
1469AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1470 uint32_t &NumLanes) {
1471 unsigned RegNum;
1472 StringRef Layout;
1473 SMLoc RegEndLoc, LayoutLoc;
1474 SMLoc S = Parser.getTok().getLoc();
1475
1476 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1477 return MatchOperand_NoMatch;
1478
1479 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1480
1481 if (Layout.size() != 0) {
1482 unsigned long long TmpLanes = 0;
1483 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1484 if (TmpLanes != 0) {
1485 NumLanes = TmpLanes;
1486 } else {
1487 // If the number of lanes isn't specified explicitly, a valid instruction
1488 // will have an element specifier and be capable of acting on the entire
1489 // vector register.
1490 switch (Layout.back()) {
1491 default: llvm_unreachable("Invalid layout specifier");
1492 case 'b': NumLanes = 16; break;
1493 case 'h': NumLanes = 8; break;
1494 case 's': NumLanes = 4; break;
1495 case 'd': NumLanes = 2; break;
1496 }
1497 }
1498
1499 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1500 }
1501
1502 Parser.Lex();
1503 return MatchOperand_Success;
1504}
1505
1506bool
1507AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1508 SMLoc &EndLoc) {
1509 // This callback is used for things like DWARF frame directives in
1510 // assembly. They don't care about things like NEON layouts or lanes, they
1511 // just want to be able to produce the DWARF register number.
1512 StringRef LayoutSpec;
1513 SMLoc RegEndLoc, LayoutLoc;
1514 StartLoc = Parser.getTok().getLoc();
1515
1516 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1517 return true;
1518
1519 Parser.Lex();
1520 EndLoc = Parser.getTok().getLoc();
1521
1522 return false;
1523}
1524
1525AArch64AsmParser::OperandMatchResultTy
1526AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1527 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1528 // Since these operands occur in very limited circumstances, without
1529 // alternatives, we actually signal an error if there is no match. If relaxing
1530 // this, beware of unintended consequences: an immediate will be accepted
1531 // during matching, no matter how it gets into the AArch64Operand.
1532 const AsmToken &Tok = Parser.getTok();
1533 SMLoc S = Tok.getLoc();
1534
1535 if (Tok.is(AsmToken::Identifier)) {
1536 bool ValidName;
1537 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1538
1539 if (!ValidName) {
1540 Error(S, "operand specifier not recognised");
1541 return MatchOperand_ParseFail;
1542 }
1543
1544 Parser.Lex(); // We're done with the identifier. Eat it
1545
1546 SMLoc E = Parser.getTok().getLoc();
1547 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1548 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1549 return MatchOperand_Success;
1550 } else if (Tok.is(AsmToken::Hash)) {
1551 Parser.Lex();
1552
1553 const MCExpr *ImmVal;
1554 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1555 return MatchOperand_ParseFail;
1556
1557 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1558 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1559 Error(S, "Invalid immediate for instruction");
1560 return MatchOperand_ParseFail;
1561 }
1562
1563 SMLoc E = Parser.getTok().getLoc();
1564 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1565 return MatchOperand_Success;
1566 }
1567
1568 Error(S, "unexpected operand for instruction");
1569 return MatchOperand_ParseFail;
1570}
1571
1572AArch64AsmParser::OperandMatchResultTy
1573AArch64AsmParser::ParseSysRegOperand(
1574 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1575 const AsmToken &Tok = Parser.getTok();
1576
1577 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1578 // kind of string: SPSel is valid for two different forms of MSR with two
1579 // different encodings. There's no collision at the moment, but the potential
1580 // is there.
1581 if (!Tok.is(AsmToken::Identifier)) {
1582 return MatchOperand_NoMatch;
1583 }
1584
1585 SMLoc S = Tok.getLoc();
1586 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1587 Parser.Lex(); // Eat identifier
1588
1589 return MatchOperand_Success;
1590}
1591
1592AArch64AsmParser::OperandMatchResultTy
1593AArch64AsmParser::ParseLSXAddressOperand(
1594 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1595 SMLoc S = Parser.getTok().getLoc();
1596
1597 unsigned RegNum;
1598 SMLoc RegEndLoc, LayoutLoc;
1599 StringRef Layout;
1600 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1601 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1602 || Layout.size() != 0) {
1603 // Check Layout.size because we don't want to let "x3.4s" or similar
1604 // through.
1605 return MatchOperand_NoMatch;
1606 }
1607 Parser.Lex(); // Eat register
1608
1609 if (Parser.getTok().is(AsmToken::RBrac)) {
1610 // We're done
1611 SMLoc E = Parser.getTok().getLoc();
1612 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1613 return MatchOperand_Success;
1614 }
1615
1616 // Otherwise, only ", #0" is valid
1617
1618 if (Parser.getTok().isNot(AsmToken::Comma)) {
1619 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1620 return MatchOperand_ParseFail;
1621 }
1622 Parser.Lex(); // Eat ','
1623
1624 if (Parser.getTok().isNot(AsmToken::Hash)) {
1625 Error(Parser.getTok().getLoc(), "expected '#0'");
1626 return MatchOperand_ParseFail;
1627 }
1628 Parser.Lex(); // Eat '#'
1629
1630 if (Parser.getTok().isNot(AsmToken::Integer)
1631 || Parser.getTok().getIntVal() != 0 ) {
1632 Error(Parser.getTok().getLoc(), "expected '#0'");
1633 return MatchOperand_ParseFail;
1634 }
1635 Parser.Lex(); // Eat '0'
1636
1637 SMLoc E = Parser.getTok().getLoc();
1638 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1639 return MatchOperand_Success;
1640}
1641
1642AArch64AsmParser::OperandMatchResultTy
1643AArch64AsmParser::ParseShiftExtend(
1644 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1645 StringRef IDVal = Parser.getTok().getIdentifier();
1646 std::string LowerID = IDVal.lower();
1647
1648 A64SE::ShiftExtSpecifiers Spec =
1649 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1650 .Case("lsl", A64SE::LSL)
1651 .Case("lsr", A64SE::LSR)
1652 .Case("asr", A64SE::ASR)
1653 .Case("ror", A64SE::ROR)
1654 .Case("uxtb", A64SE::UXTB)
1655 .Case("uxth", A64SE::UXTH)
1656 .Case("uxtw", A64SE::UXTW)
1657 .Case("uxtx", A64SE::UXTX)
1658 .Case("sxtb", A64SE::SXTB)
1659 .Case("sxth", A64SE::SXTH)
1660 .Case("sxtw", A64SE::SXTW)
1661 .Case("sxtx", A64SE::SXTX)
1662 .Default(A64SE::Invalid);
1663
1664 if (Spec == A64SE::Invalid)
1665 return MatchOperand_NoMatch;
1666
1667 // Eat the shift
1668 SMLoc S, E;
1669 S = Parser.getTok().getLoc();
1670 Parser.Lex();
1671
1672 if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
1673 Spec != A64SE::ASR && Spec != A64SE::ROR) {
1674 // The shift amount can be omitted for the extending versions, but not real
1675 // shifts:
1676 // add x0, x0, x0, uxtb
1677 // is valid, and equivalent to
1678 // add x0, x0, x0, uxtb #0
1679
1680 if (Parser.getTok().is(AsmToken::Comma) ||
1681 Parser.getTok().is(AsmToken::EndOfStatement) ||
1682 Parser.getTok().is(AsmToken::RBrac)) {
Tim Northoverbcaca872013-02-05 13:24:56 +00001683 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1684 S, E));
Tim Northovere0e3aef2013-01-31 12:12:40 +00001685 return MatchOperand_Success;
1686 }
1687 }
1688
1689 // Eat # at beginning of immediate
1690 if (!Parser.getTok().is(AsmToken::Hash)) {
1691 Error(Parser.getTok().getLoc(),
1692 "expected #imm after shift specifier");
1693 return MatchOperand_ParseFail;
1694 }
1695 Parser.Lex();
1696
1697 // Make sure we do actually have a number
1698 if (!Parser.getTok().is(AsmToken::Integer)) {
1699 Error(Parser.getTok().getLoc(),
1700 "expected integer shift amount");
1701 return MatchOperand_ParseFail;
1702 }
1703 unsigned Amount = Parser.getTok().getIntVal();
1704 Parser.Lex();
1705 E = Parser.getTok().getLoc();
1706
Tim Northoverbcaca872013-02-05 13:24:56 +00001707 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1708 S, E));
Tim Northovere0e3aef2013-01-31 12:12:40 +00001709
1710 return MatchOperand_Success;
1711}
1712
1713// FIXME: We would really like to be able to tablegen'erate this.
1714bool AArch64AsmParser::
1715validateInstruction(MCInst &Inst,
1716 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1717 switch (Inst.getOpcode()) {
1718 case AArch64::BFIwwii:
1719 case AArch64::BFIxxii:
1720 case AArch64::SBFIZwwii:
1721 case AArch64::SBFIZxxii:
1722 case AArch64::UBFIZwwii:
1723 case AArch64::UBFIZxxii: {
1724 unsigned ImmOps = Inst.getNumOperands() - 2;
1725 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1726 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1727
1728 if (ImmR == 0) {
1729 // Bitfield inserts are preferred disassembly if ImmS < ImmR. However,
1730 // there is this one case where insert is valid syntax but the bfx
1731 // disassembly should be used: e.g. "sbfiz w0, w0, #0, #1".
1732 return false;
1733 } else if (ImmS >= ImmR) {
1734 return Error(Operands[4]->getStartLoc(),
1735 "requested insert overflows register");
1736 }
1737 return false;
1738 }
1739 case AArch64::BFXILwwii:
1740 case AArch64::BFXILxxii:
1741 case AArch64::SBFXwwii:
1742 case AArch64::SBFXxxii:
1743 case AArch64::UBFXwwii:
1744 case AArch64::UBFXxxii: {
1745 unsigned ImmOps = Inst.getNumOperands() - 2;
1746 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1747 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1748 int64_t RegWidth = 0;
1749 switch (Inst.getOpcode()) {
1750 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1751 RegWidth = 64;
1752 break;
1753 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1754 RegWidth = 32;
1755 break;
1756 }
1757
1758 if (ImmS >= RegWidth || ImmS < ImmR) {
1759 return Error(Operands[4]->getStartLoc(),
1760 "requested extract overflows register");
1761 }
1762 return false;
1763 }
1764 case AArch64::ICix: {
1765 int64_t ImmVal = Inst.getOperand(0).getImm();
1766 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1767 if (!A64IC::NeedsRegister(ICOp)) {
1768 return Error(Operands[1]->getStartLoc(),
1769 "specified IC op does not use a register");
1770 }
1771 return false;
1772 }
1773 case AArch64::ICi: {
1774 int64_t ImmVal = Inst.getOperand(0).getImm();
1775 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1776 if (A64IC::NeedsRegister(ICOp)) {
1777 return Error(Operands[1]->getStartLoc(),
1778 "specified IC op requires a register");
1779 }
1780 return false;
1781 }
1782 case AArch64::TLBIix: {
1783 int64_t ImmVal = Inst.getOperand(0).getImm();
1784 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1785 if (!A64TLBI::NeedsRegister(TLBIOp)) {
1786 return Error(Operands[1]->getStartLoc(),
1787 "specified TLBI op does not use a register");
1788 }
1789 return false;
1790 }
1791 case AArch64::TLBIi: {
1792 int64_t ImmVal = Inst.getOperand(0).getImm();
1793 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1794 if (A64TLBI::NeedsRegister(TLBIOp)) {
1795 return Error(Operands[1]->getStartLoc(),
1796 "specified TLBI op requires a register");
1797 }
1798 return false;
1799 }
1800 }
1801
1802 return false;
1803}
1804
1805
1806// Parses the instruction *together with* all operands, appending each parsed
1807// operand to the "Operands" list
1808bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1809 StringRef Name, SMLoc NameLoc,
1810 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1811 size_t CondCodePos = Name.find('.');
1812
1813 StringRef Mnemonic = Name.substr(0, CondCodePos);
1814 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1815
1816 if (CondCodePos != StringRef::npos) {
1817 // We have a condition code
1818 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1819 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1820 A64CC::CondCodes Code;
1821
1822 Code = A64StringToCondCode(CondStr);
1823
1824 if (Code == A64CC::Invalid) {
1825 Error(S, "invalid condition code");
1826 Parser.EatToEndOfStatement();
1827 return true;
1828 }
1829
1830 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1831
1832 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
1833 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1834 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1835 }
1836
1837 // Now we parse the operands of this instruction
1838 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1839 // Read the first operand.
1840 if (ParseOperand(Operands, Mnemonic)) {
1841 Parser.EatToEndOfStatement();
1842 return true;
1843 }
1844
1845 while (getLexer().is(AsmToken::Comma)) {
1846 Parser.Lex(); // Eat the comma.
1847
1848 // Parse and remember the operand.
1849 if (ParseOperand(Operands, Mnemonic)) {
1850 Parser.EatToEndOfStatement();
1851 return true;
1852 }
1853
1854
1855 // After successfully parsing some operands there are two special cases to
1856 // consider (i.e. notional operands not separated by commas). Both are due
1857 // to memory specifiers:
1858 // + An RBrac will end an address for load/store/prefetch
1859 // + An '!' will indicate a pre-indexed operation.
1860 //
1861 // It's someone else's responsibility to make sure these tokens are sane
1862 // in the given context!
1863 if (Parser.getTok().is(AsmToken::RBrac)) {
1864 SMLoc Loc = Parser.getTok().getLoc();
1865 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1866 Parser.Lex();
1867 }
1868
1869 if (Parser.getTok().is(AsmToken::Exclaim)) {
1870 SMLoc Loc = Parser.getTok().getLoc();
1871 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
1872 Parser.Lex();
1873 }
1874 }
1875 }
1876
1877 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1878 SMLoc Loc = getLexer().getLoc();
1879 Parser.EatToEndOfStatement();
Tim Northover60baeb92013-02-11 09:29:37 +00001880 return Error(Loc, "expected comma before next operand");
Tim Northovere0e3aef2013-01-31 12:12:40 +00001881 }
1882
1883 // Eat the EndOfStatement
1884 Parser.Lex();
1885
1886 return false;
1887}
1888
1889bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
1890 StringRef IDVal = DirectiveID.getIdentifier();
1891 if (IDVal == ".hword")
1892 return ParseDirectiveWord(2, DirectiveID.getLoc());
1893 else if (IDVal == ".word")
1894 return ParseDirectiveWord(4, DirectiveID.getLoc());
1895 else if (IDVal == ".xword")
1896 return ParseDirectiveWord(8, DirectiveID.getLoc());
1897 else if (IDVal == ".tlsdesccall")
1898 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
1899
1900 return true;
1901}
1902
1903/// parseDirectiveWord
1904/// ::= .word [ expression (, expression)* ]
1905bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
1906 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1907 for (;;) {
1908 const MCExpr *Value;
1909 if (getParser().ParseExpression(Value))
1910 return true;
1911
1912 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
1913
1914 if (getLexer().is(AsmToken::EndOfStatement))
1915 break;
1916
1917 // FIXME: Improve diagnostic.
1918 if (getLexer().isNot(AsmToken::Comma))
1919 return Error(L, "unexpected token in directive");
1920 Parser.Lex();
1921 }
1922 }
1923
1924 Parser.Lex();
1925 return false;
1926}
1927
1928// parseDirectiveTLSDescCall:
1929// ::= .tlsdesccall symbol
1930bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
1931 StringRef Name;
1932 if (getParser().ParseIdentifier(Name))
1933 return Error(L, "expected symbol after directive");
1934
1935 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
1936 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
1937
1938 MCInst Inst;
1939 Inst.setOpcode(AArch64::TLSDESCCALL);
1940 Inst.addOperand(MCOperand::CreateExpr(Expr));
1941
1942 getParser().getStreamer().EmitInstruction(Inst);
1943 return false;
1944}
1945
1946
1947bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1948 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1949 MCStreamer &Out, unsigned &ErrorInfo,
1950 bool MatchingInlineAsm) {
1951 MCInst Inst;
1952 unsigned MatchResult;
Tim Northoverbcaca872013-02-05 13:24:56 +00001953 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
Tim Northovere0e3aef2013-01-31 12:12:40 +00001954 MatchingInlineAsm);
Tim Northover60baeb92013-02-11 09:29:37 +00001955
1956 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
1957 return Error(IDLoc, "too few operands for instruction");
1958
Tim Northovere0e3aef2013-01-31 12:12:40 +00001959 switch (MatchResult) {
1960 default: break;
1961 case Match_Success:
1962 if (validateInstruction(Inst, Operands))
1963 return true;
1964
1965 Out.EmitInstruction(Inst);
1966 return false;
1967 case Match_MissingFeature:
1968 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
1969 return true;
1970 case Match_InvalidOperand: {
1971 SMLoc ErrorLoc = IDLoc;
1972 if (ErrorInfo != ~0U) {
Tim Northovere0e3aef2013-01-31 12:12:40 +00001973 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
1974 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
1975 }
1976
1977 return Error(ErrorLoc, "invalid operand for instruction");
1978 }
1979 case Match_MnemonicFail:
1980 return Error(IDLoc, "invalid instruction");
Tim Northover60baeb92013-02-11 09:29:37 +00001981
1982 case Match_AddSubRegExtendSmall:
1983 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1984 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
1985 case Match_AddSubRegExtendLarge:
1986 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1987 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
1988 case Match_AddSubRegShift32:
1989 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1990 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
1991 case Match_AddSubRegShift64:
1992 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1993 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
1994 case Match_AddSubSecondSource:
1995 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1996 "expected compatible register, symbol or integer in range [0, 4095]");
1997 case Match_CVTFixedPos32:
1998 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1999 "expected integer in range [1, 32]");
2000 case Match_CVTFixedPos64:
2001 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2002 "expected integer in range [1, 64]");
2003 case Match_CondCode:
2004 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2005 "expected AArch64 condition code");
2006 case Match_FPImm:
2007 // Any situation which allows a nontrivial floating-point constant also
2008 // allows a register.
2009 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2010 "expected compatible register or floating-point constant");
2011 case Match_FPZero:
2012 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2013 "expected floating-point constant #0.0");
2014 case Match_Label:
2015 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2016 "expected label or encodable integer pc offset");
2017 case Match_Lane1:
2018 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2019 "expected lane specifier '[1]'");
2020 case Match_LoadStoreExtend32_1:
2021 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2022 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2023 case Match_LoadStoreExtend32_2:
2024 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2025 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2026 case Match_LoadStoreExtend32_4:
2027 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2028 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2029 case Match_LoadStoreExtend32_8:
2030 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2031 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2032 case Match_LoadStoreExtend32_16:
2033 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2034 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2035 case Match_LoadStoreExtend64_1:
2036 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2037 "expected 'lsl' or 'sxtx' with optional shift of #0");
2038 case Match_LoadStoreExtend64_2:
2039 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2040 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2041 case Match_LoadStoreExtend64_4:
2042 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2043 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2044 case Match_LoadStoreExtend64_8:
2045 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2046 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2047 case Match_LoadStoreExtend64_16:
2048 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2049 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2050 case Match_LoadStoreSImm7_4:
2051 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2052 "expected integer multiple of 4 in range [-256, 252]");
2053 case Match_LoadStoreSImm7_8:
2054 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2055 "expected integer multiple of 8 in range [-512, 508]");
2056 case Match_LoadStoreSImm7_16:
2057 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2058 "expected integer multiple of 16 in range [-1024, 1016]");
2059 case Match_LoadStoreSImm9:
2060 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2061 "expected integer in range [-256, 255]");
2062 case Match_LoadStoreUImm12_1:
2063 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2064 "expected symbolic reference or integer in range [0, 4095]");
2065 case Match_LoadStoreUImm12_2:
2066 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2067 "expected symbolic reference or integer in range [0, 8190]");
2068 case Match_LoadStoreUImm12_4:
2069 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2070 "expected symbolic reference or integer in range [0, 16380]");
2071 case Match_LoadStoreUImm12_8:
2072 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2073 "expected symbolic reference or integer in range [0, 32760]");
2074 case Match_LoadStoreUImm12_16:
2075 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2076 "expected symbolic reference or integer in range [0, 65520]");
2077 case Match_LogicalSecondSource:
2078 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2079 "expected compatible register or logical immediate");
2080 case Match_MOVWUImm16:
2081 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2082 "expected relocated symbol or integer in range [0, 65535]");
2083 case Match_MRS:
2084 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2085 "expected readable system register");
2086 case Match_MSR:
2087 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2088 "expected writable system register or pstate");
2089 case Match_NamedImm_at:
2090 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2091 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2092 case Match_NamedImm_dbarrier:
2093 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2094 "expected integer in range [0, 15] or symbolic barrier operand");
2095 case Match_NamedImm_dc:
2096 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2097 "expected symbolic 'dc' operand");
2098 case Match_NamedImm_ic:
2099 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2100 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2101 case Match_NamedImm_isb:
2102 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2103 "expected integer in range [0, 15] or 'sy'");
2104 case Match_NamedImm_prefetch:
2105 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2106 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2107 case Match_NamedImm_tlbi:
2108 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2109 "expected translation buffer invalidation operand");
2110 case Match_UImm16:
2111 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2112 "expected integer in range [0, 65535]");
2113 case Match_UImm3:
2114 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2115 "expected integer in range [0, 7]");
2116 case Match_UImm4:
2117 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2118 "expected integer in range [0, 15]");
2119 case Match_UImm5:
2120 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2121 "expected integer in range [0, 31]");
2122 case Match_UImm6:
2123 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2124 "expected integer in range [0, 63]");
2125 case Match_UImm7:
2126 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2127 "expected integer in range [0, 127]");
2128 case Match_Width32:
2129 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2130 "expected integer in range [<lsb>, 31]");
2131 case Match_Width64:
2132 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2133 "expected integer in range [<lsb>, 63]");
Tim Northovere0e3aef2013-01-31 12:12:40 +00002134 }
2135
2136 llvm_unreachable("Implement any new match types added!");
2137 return true;
2138}
2139
2140void AArch64Operand::print(raw_ostream &OS) const {
2141 switch (Kind) {
2142 case k_CondCode:
2143 OS << "<CondCode: " << CondCode.Code << ">";
2144 break;
2145 case k_FPImmediate:
2146 OS << "<fpimm: " << FPImm.Val << ">";
2147 break;
2148 case k_ImmWithLSL:
2149 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2150 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2151 break;
2152 case k_Immediate:
2153 getImm()->print(OS);
2154 break;
2155 case k_Register:
2156 OS << "<register " << getReg() << '>';
2157 break;
2158 case k_Token:
2159 OS << '\'' << getToken() << '\'';
2160 break;
2161 case k_ShiftExtend:
2162 OS << "<shift: type=" << ShiftExtend.ShiftType
2163 << ", amount=" << ShiftExtend.Amount << ">";
2164 break;
2165 case k_SysReg: {
2166 StringRef Name(SysReg.Data, SysReg.Length);
2167 OS << "<sysreg: " << Name << '>';
2168 break;
2169 }
2170 default:
2171 llvm_unreachable("No idea how to print this kind of operand");
2172 break;
2173 }
2174}
2175
2176void AArch64Operand::dump() const {
2177 print(errs());
2178}
2179
2180
2181/// Force static initialization.
2182extern "C" void LLVMInitializeAArch64AsmParser() {
2183 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2184}
2185
2186#define GET_REGISTER_MATCHER
2187#define GET_MATCHER_IMPLEMENTATION
2188#include "AArch64GenAsmMatcher.inc"