blob: c1695dacb43a6ad62ea18a62019760b0c364235e [file] [log] [blame]
Tim Northovere0e3aef2013-01-31 12:12:40 +00001//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
Tim Northover75f436c2013-02-14 16:17:01 +00009//
10// This file contains the (GNU-style) assembly parser for the AArch64
11// architecture.
12//
13//===----------------------------------------------------------------------===//
Tim Northovere0e3aef2013-01-31 12:12:40 +000014
15
Tim Northovere0e3aef2013-01-31 12:12:40 +000016#include "MCTargetDesc/AArch64MCTargetDesc.h"
17#include "MCTargetDesc/AArch64MCExpr.h"
Tim Northover969afbe2013-02-05 13:24:47 +000018#include "Utils/AArch64BaseInfo.h"
Tim Northovere0e3aef2013-01-31 12:12:40 +000019#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCSubtargetInfo.h"
26#include "llvm/MC/MCTargetAsmParser.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Support/TargetRegistry.h"
36
37using namespace llvm;
38
39namespace {
40
41class AArch64Operand;
42
43class AArch64AsmParser : public MCTargetAsmParser {
44 MCSubtargetInfo &STI;
45 MCAsmParser &Parser;
46
47#define GET_ASSEMBLER_HEADER
48#include "AArch64GenAsmMatcher.inc"
49
50public:
Tim Northover60baeb92013-02-11 09:29:37 +000051 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53#define GET_OPERAND_DIAGNOSTIC_TYPES
54#include "AArch64GenAsmMatcher.inc"
55 };
56
Tim Northovere0e3aef2013-01-31 12:12:40 +000057 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
58 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
59 MCAsmParserExtension::Initialize(_Parser);
60
61 // Initialize the set of available features.
62 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
63 }
64
65 // These are the public interface of the MCTargetAsmParser
66 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
Tim Northoverbcaca872013-02-05 13:24:56 +000067 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Tim Northovere0e3aef2013-01-31 12:12:40 +000068 SMLoc NameLoc,
69 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
70
71 bool ParseDirective(AsmToken DirectiveID);
72 bool ParseDirectiveTLSDescCall(SMLoc L);
73 bool ParseDirectiveWord(unsigned Size, SMLoc L);
74
75 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
77 MCStreamer&Out, unsigned &ErrorInfo,
78 bool MatchingInlineAsm);
79
80 // The rest of the sub-parsers have more freedom over interface: they return
81 // an OperandMatchResultTy because it's less ambiguous than true/false or
82 // -1/0/1 even if it is more verbose
83 OperandMatchResultTy
84 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
85 StringRef Mnemonic);
86
87 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
88
89 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
90
91 OperandMatchResultTy
92 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
93 uint32_t NumLanes);
94
95 OperandMatchResultTy
96 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
97 uint32_t &NumLanes);
98
99 OperandMatchResultTy
100 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
101
102 OperandMatchResultTy
103 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104
105 OperandMatchResultTy
106 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
107
108 OperandMatchResultTy
109 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
110
111 template<typename SomeNamedImmMapper> OperandMatchResultTy
112 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
113 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
114 }
115
116 OperandMatchResultTy
117 ParseNamedImmOperand(const NamedImmMapper &Mapper,
118 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
119
120 OperandMatchResultTy
121 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
122
123 OperandMatchResultTy
124 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
125
126 OperandMatchResultTy
127 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
128
129 bool validateInstruction(MCInst &Inst,
Tim Northoverbcaca872013-02-05 13:24:56 +0000130 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000131
132 /// Scan the next token (which had better be an identifier) and determine
133 /// whether it represents a general-purpose or vector register. It returns
134 /// true if an identifier was found and populates its reference arguments. It
135 /// does not consume the token.
136 bool
137 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
138 SMLoc &LayoutLoc) const;
139
140};
141
142}
143
144namespace {
145
146/// Instances of this class represent a parsed AArch64 machine instruction.
147class AArch64Operand : public MCParsedAsmOperand {
148private:
149 enum KindTy {
150 k_ImmWithLSL, // #uimm {, LSL #amt }
151 k_CondCode, // eq/ne/...
152 k_FPImmediate, // Limited-precision floating-point imm
153 k_Immediate, // Including expressions referencing symbols
154 k_Register,
155 k_ShiftExtend,
156 k_SysReg, // The register operand of MRS and MSR instructions
157 k_Token, // The mnemonic; other raw tokens the auto-generated
158 k_WrappedRegister // Load/store exclusive permit a wrapped register.
159 } Kind;
160
161 SMLoc StartLoc, EndLoc;
162
163 union {
164 struct {
165 const MCExpr *Val;
166 unsigned ShiftAmount;
167 bool ImplicitAmount;
168 } ImmWithLSL;
169
170 struct {
171 A64CC::CondCodes Code;
172 } CondCode;
173
174 struct {
175 double Val;
176 } FPImm;
177
178 struct {
179 const MCExpr *Val;
180 } Imm;
181
182 struct {
183 unsigned RegNum;
184 } Reg;
185
186 struct {
187 A64SE::ShiftExtSpecifiers ShiftType;
188 unsigned Amount;
189 bool ImplicitAmount;
190 } ShiftExtend;
191
192 struct {
193 const char *Data;
194 unsigned Length;
195 } SysReg;
196
197 struct {
198 const char *Data;
199 unsigned Length;
200 } Tok;
201 };
202
203 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
204 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
205
206public:
207 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
208 }
209
210 SMLoc getStartLoc() const { return StartLoc; }
211 SMLoc getEndLoc() const { return EndLoc; }
212 void print(raw_ostream&) const;
213 void dump() const;
214
215 StringRef getToken() const {
216 assert(Kind == k_Token && "Invalid access!");
217 return StringRef(Tok.Data, Tok.Length);
218 }
219
220 unsigned getReg() const {
221 assert((Kind == k_Register || Kind == k_WrappedRegister)
222 && "Invalid access!");
223 return Reg.RegNum;
224 }
225
226 const MCExpr *getImm() const {
227 assert(Kind == k_Immediate && "Invalid access!");
228 return Imm.Val;
229 }
230
231 A64CC::CondCodes getCondCode() const {
232 assert(Kind == k_CondCode && "Invalid access!");
233 return CondCode.Code;
234 }
235
236 static bool isNonConstantExpr(const MCExpr *E,
237 AArch64MCExpr::VariantKind &Variant) {
238 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
239 Variant = A64E->getKind();
240 return true;
241 } else if (!isa<MCConstantExpr>(E)) {
242 Variant = AArch64MCExpr::VK_AARCH64_None;
243 return true;
244 }
245
246 return false;
247 }
248
249 bool isCondCode() const { return Kind == k_CondCode; }
250 bool isToken() const { return Kind == k_Token; }
251 bool isReg() const { return Kind == k_Register; }
252 bool isImm() const { return Kind == k_Immediate; }
253 bool isMem() const { return false; }
254 bool isFPImm() const { return Kind == k_FPImmediate; }
255 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
256 bool isSysReg() const { return Kind == k_SysReg; }
257 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
258 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
259
260 bool isAddSubImmLSL0() const {
261 if (!isImmWithLSL()) return false;
262 if (ImmWithLSL.ShiftAmount != 0) return false;
263
264 AArch64MCExpr::VariantKind Variant;
265 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
266 return Variant == AArch64MCExpr::VK_AARCH64_LO12
267 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
268 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
269 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
270 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
271 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
272 }
273
274 // Otherwise it should be a real immediate in range:
275 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
276 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
277 }
278
279 bool isAddSubImmLSL12() const {
280 if (!isImmWithLSL()) return false;
281 if (ImmWithLSL.ShiftAmount != 12) return false;
282
283 AArch64MCExpr::VariantKind Variant;
284 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
285 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
286 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
287 }
288
289 // Otherwise it should be a real immediate in range:
290 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
291 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
292 }
293
294 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
295 if (!isShiftOrExtend()) return false;
296
297 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
298 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
299 return false;
300
301 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
302 return false;
303
304 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
305 }
306
307 bool isAdrpLabel() const {
308 if (!isImm()) return false;
309
310 AArch64MCExpr::VariantKind Variant;
311 if (isNonConstantExpr(getImm(), Variant)) {
312 return Variant == AArch64MCExpr::VK_AARCH64_None
313 || Variant == AArch64MCExpr::VK_AARCH64_GOT
314 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
315 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
316 }
317
318 return isLabel<21, 4096>();
319 }
320
321 template<unsigned RegWidth> bool isBitfieldWidth() const {
322 if (!isImm()) return false;
323
324 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
325 if (!CE) return false;
326
327 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
328 }
329
330 template<int RegWidth>
331 bool isCVTFixedPos() const {
332 if (!isImm()) return false;
333
334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
335 if (!CE) return false;
336
337 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
338 }
339
340 bool isFMOVImm() const {
341 if (!isFPImm()) return false;
342
343 APFloat RealVal(FPImm.Val);
344 uint32_t ImmVal;
345 return A64Imms::isFPImm(RealVal, ImmVal);
346 }
347
348 bool isFPZero() const {
349 if (!isFPImm()) return false;
350
351 APFloat RealVal(FPImm.Val);
352 return RealVal.isPosZero();
353 }
354
355 template<unsigned field_width, unsigned scale>
356 bool isLabel() const {
357 if (!isImm()) return false;
358
359 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
360 return true;
361 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
362 int64_t Val = CE->getValue();
363 int64_t Min = - (scale * (1LL << (field_width - 1)));
364 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
365 return (Val % scale) == 0 && Val >= Min && Val <= Max;
366 }
367
368 // N.b. this disallows explicit relocation specifications via an
369 // AArch64MCExpr. Users needing that behaviour
370 return false;
371 }
372
373 bool isLane1() const {
374 if (!isImm()) return false;
375
376 // Because it's come through custom assembly parsing, it must always be a
377 // constant expression.
378 return cast<MCConstantExpr>(getImm())->getValue() == 1;
379 }
380
381 bool isLoadLitLabel() const {
382 if (!isImm()) return false;
383
384 AArch64MCExpr::VariantKind Variant;
385 if (isNonConstantExpr(getImm(), Variant)) {
386 return Variant == AArch64MCExpr::VK_AARCH64_None
387 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
388 }
389
390 return isLabel<19, 4>();
391 }
392
393 template<unsigned RegWidth> bool isLogicalImm() const {
394 if (!isImm()) return false;
395
396 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
397 if (!CE) return false;
398
399 uint32_t Bits;
400 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
401 }
402
403 template<unsigned RegWidth> bool isLogicalImmMOV() const {
404 if (!isLogicalImm<RegWidth>()) return false;
405
406 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
407
408 // The move alias for ORR is only valid if the immediate cannot be
409 // represented with a move (immediate) instruction; they take priority.
410 int UImm16, Shift;
411 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
412 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
413 }
414
415 template<int MemSize>
416 bool isOffsetUImm12() const {
417 if (!isImm()) return false;
418
419 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
420
421 // Assume they know what they're doing for now if they've given us a
422 // non-constant expression. In principle we could check for ridiculous
423 // things that can't possibly work or relocations that would almost
424 // certainly break resulting code.
425 if (!CE)
426 return true;
427
428 int64_t Val = CE->getValue();
429
430 // Must be a multiple of the access size in bytes.
431 if ((Val & (MemSize - 1)) != 0) return false;
432
433 // Must be 12-bit unsigned
434 return Val >= 0 && Val <= 0xfff * MemSize;
435 }
436
437 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
438 bool isShift() const {
439 if (!isShiftOrExtend()) return false;
440
441 if (ShiftExtend.ShiftType != SHKind)
442 return false;
443
444 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
445 }
446
447 bool isMOVN32Imm() const {
448 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
449 AArch64MCExpr::VK_AARCH64_SABS_G0,
450 AArch64MCExpr::VK_AARCH64_SABS_G1,
451 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
452 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
453 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
454 AArch64MCExpr::VK_AARCH64_TPREL_G1,
455 AArch64MCExpr::VK_AARCH64_TPREL_G0,
456 };
457 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
458
459 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
460 }
461
462 bool isMOVN64Imm() const {
463 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
464 AArch64MCExpr::VK_AARCH64_SABS_G0,
465 AArch64MCExpr::VK_AARCH64_SABS_G1,
466 AArch64MCExpr::VK_AARCH64_SABS_G2,
467 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
468 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
469 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
470 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
471 AArch64MCExpr::VK_AARCH64_TPREL_G2,
472 AArch64MCExpr::VK_AARCH64_TPREL_G1,
473 AArch64MCExpr::VK_AARCH64_TPREL_G0,
474 };
475 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
476
477 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
478 }
479
480
481 bool isMOVZ32Imm() const {
482 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
483 AArch64MCExpr::VK_AARCH64_ABS_G0,
484 AArch64MCExpr::VK_AARCH64_ABS_G1,
485 AArch64MCExpr::VK_AARCH64_SABS_G0,
486 AArch64MCExpr::VK_AARCH64_SABS_G1,
487 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
488 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
489 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
490 AArch64MCExpr::VK_AARCH64_TPREL_G1,
491 AArch64MCExpr::VK_AARCH64_TPREL_G0,
492 };
493 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
494
495 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
496 }
497
498 bool isMOVZ64Imm() const {
499 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
500 AArch64MCExpr::VK_AARCH64_ABS_G0,
501 AArch64MCExpr::VK_AARCH64_ABS_G1,
502 AArch64MCExpr::VK_AARCH64_ABS_G2,
503 AArch64MCExpr::VK_AARCH64_ABS_G3,
504 AArch64MCExpr::VK_AARCH64_SABS_G0,
505 AArch64MCExpr::VK_AARCH64_SABS_G1,
506 AArch64MCExpr::VK_AARCH64_SABS_G2,
507 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
508 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
509 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
510 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
511 AArch64MCExpr::VK_AARCH64_TPREL_G2,
512 AArch64MCExpr::VK_AARCH64_TPREL_G1,
513 AArch64MCExpr::VK_AARCH64_TPREL_G0,
514 };
515 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
516
517 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
518 }
519
520 bool isMOVK32Imm() const {
521 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
522 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
523 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
524 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
525 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
526 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
527 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
528 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
529 };
530 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
531
532 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
533 }
534
535 bool isMOVK64Imm() const {
536 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
537 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
538 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
539 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
540 AArch64MCExpr::VK_AARCH64_ABS_G3,
541 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
542 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
543 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
544 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
545 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
546 };
547 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
548
549 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
550 }
551
552 bool isMoveWideImm(unsigned RegWidth,
553 AArch64MCExpr::VariantKind *PermittedModifiers,
554 unsigned NumModifiers) const {
555 if (!isImmWithLSL()) return false;
556
557 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
558 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
559
560 AArch64MCExpr::VariantKind Modifier;
561 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
562 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
563 if (!ImmWithLSL.ImplicitAmount) return false;
564
565 for (unsigned i = 0; i < NumModifiers; ++i)
566 if (PermittedModifiers[i] == Modifier) return true;
567
568 return false;
569 }
570
571 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
572 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
573 }
574
575 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
576 bool isMoveWideMovAlias() const {
577 if (!isImm()) return false;
578
579 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
580 if (!CE) return false;
581
582 int UImm16, Shift;
583 uint64_t Value = CE->getValue();
584
585 // If this is a 32-bit instruction then all bits above 32 should be the
586 // same: either of these is fine because signed/unsigned values should be
587 // permitted.
588 if (RegWidth == 32) {
589 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
590 return false;
591
592 Value &= 0xffffffffULL;
593 }
594
595 return isValidImm(RegWidth, Value, UImm16, Shift);
596 }
597
598 bool isMSRWithReg() const {
599 if (!isSysReg()) return false;
600
601 bool IsKnownRegister;
602 StringRef Name(SysReg.Data, SysReg.Length);
603 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
604
605 return IsKnownRegister;
606 }
607
608 bool isMSRPState() const {
609 if (!isSysReg()) return false;
610
611 bool IsKnownRegister;
612 StringRef Name(SysReg.Data, SysReg.Length);
613 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
614
615 return IsKnownRegister;
616 }
617
618 bool isMRS() const {
619 if (!isSysReg()) return false;
620
621 // First check against specific MSR-only (write-only) registers
622 bool IsKnownRegister;
623 StringRef Name(SysReg.Data, SysReg.Length);
624 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
625
626 return IsKnownRegister;
627 }
628
629 bool isPRFM() const {
630 if (!isImm()) return false;
631
632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
633
634 if (!CE)
635 return false;
636
637 return CE->getValue() >= 0 && CE->getValue() <= 31;
638 }
639
640 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
641 if (!isShiftOrExtend()) return false;
642
643 if (ShiftExtend.ShiftType != SHKind)
644 return false;
645
646 return ShiftExtend.Amount <= 4;
647 }
648
649 bool isRegExtendLSL() const {
650 if (!isShiftOrExtend()) return false;
651
652 if (ShiftExtend.ShiftType != A64SE::LSL)
653 return false;
654
655 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
656 }
657
658 template<int MemSize> bool isSImm7Scaled() const {
659 if (!isImm()) return false;
660
661 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
662 if (!CE) return false;
663
664 int64_t Val = CE->getValue();
665 if (Val % MemSize != 0) return false;
666
667 Val /= MemSize;
668
669 return Val >= -64 && Val < 64;
670 }
671
672 template<int BitWidth>
673 bool isSImm() const {
674 if (!isImm()) return false;
675
676 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
677 if (!CE) return false;
678
679 return CE->getValue() >= -(1LL << (BitWidth - 1))
680 && CE->getValue() < (1LL << (BitWidth - 1));
681 }
682
683 template<int bitWidth>
684 bool isUImm() const {
685 if (!isImm()) return false;
686
687 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
688 if (!CE) return false;
689
690 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
691 }
692
693 bool isUImm() const {
694 if (!isImm()) return false;
695
696 return isa<MCConstantExpr>(getImm());
697 }
698
699 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
700 unsigned ShiftAmount,
701 bool ImplicitAmount,
702 SMLoc S, SMLoc E) {
703 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
704 Op->ImmWithLSL.Val = Val;
705 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
706 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
707 return Op;
708 }
709
710 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
711 SMLoc S, SMLoc E) {
712 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
713 Op->CondCode.Code = Code;
714 return Op;
715 }
716
717 static AArch64Operand *CreateFPImm(double Val,
718 SMLoc S, SMLoc E) {
719 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
720 Op->FPImm.Val = Val;
721 return Op;
722 }
723
724 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
725 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
726 Op->Imm.Val = Val;
727 return Op;
728 }
729
730 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
731 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
732 Op->Reg.RegNum = RegNum;
733 return Op;
734 }
735
736 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
737 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
738 Op->Reg.RegNum = RegNum;
739 return Op;
740 }
741
742 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
743 unsigned Amount,
744 bool ImplicitAmount,
745 SMLoc S, SMLoc E) {
746 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
747 Op->ShiftExtend.ShiftType = ShiftTyp;
748 Op->ShiftExtend.Amount = Amount;
749 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
750 return Op;
751 }
752
753 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
754 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
755 Op->Tok.Data = Str.data();
756 Op->Tok.Length = Str.size();
757 return Op;
758 }
759
760 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
761 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
762 Op->Tok.Data = Str.data();
763 Op->Tok.Length = Str.size();
764 return Op;
765 }
766
767
768 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
769 // Add as immediates when possible.
770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
771 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
772 else
773 Inst.addOperand(MCOperand::CreateExpr(Expr));
774 }
775
776 template<unsigned RegWidth>
777 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
778 assert(N == 1 && "Invalid number of operands!");
779 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
780 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
781 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
782 }
783
784 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
785 assert(N == 1 && "Invalid number of operands!");
786 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
787 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
788 }
789
790 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
791 assert(N == 1 && "Invalid number of operands!");
792
793 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
794 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
795
796 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
797 }
798
799 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
800 assert(N == 1 && "Invalid number of operands!");
801 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
802 }
803
804 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
805 assert(N == 1 && "Invalid number of operands!");
806
807 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
808 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
809 }
810
811 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
812 assert(N == 1 && "Invalid number of operands!");
813
814 APFloat RealVal(FPImm.Val);
815 uint32_t ImmVal;
816 A64Imms::isFPImm(RealVal, ImmVal);
817
818 Inst.addOperand(MCOperand::CreateImm(ImmVal));
819 }
820
821 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
822 assert(N == 1 && "Invalid number of operands");
823 Inst.addOperand(MCOperand::CreateImm(0));
824 }
825
826 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
827 assert(N == 1 && "Invalid number of operands!");
828 unsigned Encoded = A64InvertCondCode(getCondCode());
829 Inst.addOperand(MCOperand::CreateImm(Encoded));
830 }
831
832 void addRegOperands(MCInst &Inst, unsigned N) const {
833 assert(N == 1 && "Invalid number of operands!");
834 Inst.addOperand(MCOperand::CreateReg(getReg()));
835 }
836
837 void addImmOperands(MCInst &Inst, unsigned N) const {
838 assert(N == 1 && "Invalid number of operands!");
839 addExpr(Inst, getImm());
840 }
841
842 template<int MemSize>
843 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
844 assert(N == 1 && "Invalid number of operands!");
845
846 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
847 uint64_t Val = CE->getValue() / MemSize;
848 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
849 }
850
851 template<int BitWidth>
852 void addSImmOperands(MCInst &Inst, unsigned N) const {
853 assert(N == 1 && "Invalid number of operands!");
854
855 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
856 uint64_t Val = CE->getValue();
857 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
858 }
859
860 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
861 assert (N == 1 && "Invalid number of operands!");
862
863 addExpr(Inst, ImmWithLSL.Val);
864 }
865
866 template<unsigned field_width, unsigned scale>
867 void addLabelOperands(MCInst &Inst, unsigned N) const {
868 assert(N == 1 && "Invalid number of operands!");
869
870 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
871
872 if (!CE) {
873 addExpr(Inst, Imm.Val);
874 return;
875 }
876
877 int64_t Val = CE->getValue();
878 assert(Val % scale == 0 && "Unaligned immediate in instruction");
879 Val /= scale;
880
881 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
882 }
883
884 template<int MemSize>
885 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
886 assert(N == 1 && "Invalid number of operands!");
887
888 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
889 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
890 } else {
891 Inst.addOperand(MCOperand::CreateExpr(getImm()));
892 }
893 }
894
895 template<unsigned RegWidth>
896 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
897 assert(N == 1 && "Invalid number of operands");
898 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
899
900 uint32_t Bits;
901 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
902
903 Inst.addOperand(MCOperand::CreateImm(Bits));
904 }
905
906 void addMRSOperands(MCInst &Inst, unsigned N) const {
907 assert(N == 1 && "Invalid number of operands!");
908
909 bool Valid;
910 StringRef Name(SysReg.Data, SysReg.Length);
911 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
912
913 Inst.addOperand(MCOperand::CreateImm(Bits));
914 }
915
916 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
917 assert(N == 1 && "Invalid number of operands!");
918
919 bool Valid;
920 StringRef Name(SysReg.Data, SysReg.Length);
921 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
922
923 Inst.addOperand(MCOperand::CreateImm(Bits));
924 }
925
926 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
927 assert(N == 1 && "Invalid number of operands!");
928
929 bool Valid;
930 StringRef Name(SysReg.Data, SysReg.Length);
931 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
932
933 Inst.addOperand(MCOperand::CreateImm(Bits));
934 }
935
936 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
937 assert(N == 2 && "Invalid number of operands!");
938
939 addExpr(Inst, ImmWithLSL.Val);
940
941 AArch64MCExpr::VariantKind Variant;
942 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
943 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
944 return;
945 }
946
947 // We know it's relocated
948 switch (Variant) {
949 case AArch64MCExpr::VK_AARCH64_ABS_G0:
950 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
951 case AArch64MCExpr::VK_AARCH64_SABS_G0:
952 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
953 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
954 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
955 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
956 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
957 Inst.addOperand(MCOperand::CreateImm(0));
958 break;
959 case AArch64MCExpr::VK_AARCH64_ABS_G1:
960 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
961 case AArch64MCExpr::VK_AARCH64_SABS_G1:
962 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
963 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
964 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
965 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
966 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
967 Inst.addOperand(MCOperand::CreateImm(1));
968 break;
969 case AArch64MCExpr::VK_AARCH64_ABS_G2:
970 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
971 case AArch64MCExpr::VK_AARCH64_SABS_G2:
972 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
973 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
974 Inst.addOperand(MCOperand::CreateImm(2));
975 break;
976 case AArch64MCExpr::VK_AARCH64_ABS_G3:
977 Inst.addOperand(MCOperand::CreateImm(3));
978 break;
979 default: llvm_unreachable("Inappropriate move wide relocation");
980 }
981 }
982
983 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
984 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
985 assert(N == 2 && "Invalid number of operands!");
986 int UImm16, Shift;
987
988 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
989 uint64_t Value = CE->getValue();
990
991 if (RegWidth == 32) {
992 Value &= 0xffffffffULL;
993 }
994
995 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
996 (void)Valid;
997 assert(Valid && "Invalid immediates should have been weeded out by now");
998
999 Inst.addOperand(MCOperand::CreateImm(UImm16));
1000 Inst.addOperand(MCOperand::CreateImm(Shift));
1001 }
1002
1003 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1004 assert(N == 1 && "Invalid number of operands!");
1005
1006 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1007 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1008 && "PRFM operand should be 5-bits");
1009
1010 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1011 }
1012
1013 // For Add-sub (extended register) operands.
1014 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1015 assert(N == 1 && "Invalid number of operands!");
1016
1017 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1018 }
1019
1020 // For the extend in load-store (register offset) instructions.
1021 template<unsigned MemSize>
1022 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1023 addAddrRegExtendOperands(Inst, N, MemSize);
1024 }
1025
1026 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1027 unsigned MemSize) const {
1028 assert(N == 1 && "Invalid number of operands!");
1029
1030 // First bit of Option is set in instruction classes, the high two bits are
1031 // as follows:
1032 unsigned OptionHi = 0;
1033 switch (ShiftExtend.ShiftType) {
1034 case A64SE::UXTW:
1035 case A64SE::LSL:
1036 OptionHi = 1;
1037 break;
1038 case A64SE::SXTW:
1039 case A64SE::SXTX:
1040 OptionHi = 3;
1041 break;
1042 default:
1043 llvm_unreachable("Invalid extend type for register offset");
1044 }
1045
1046 unsigned S = 0;
1047 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1048 S = 1;
1049 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1050 S = 1;
1051
1052 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1053 }
1054 void addShiftOperands(MCInst &Inst, unsigned N) const {
1055 assert(N == 1 && "Invalid number of operands!");
1056
1057 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1058 }
1059};
1060
1061} // end anonymous namespace.
1062
1063AArch64AsmParser::OperandMatchResultTy
1064AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1065 StringRef Mnemonic) {
1066
1067 // See if the operand has a custom parser
1068 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1069
1070 // It could either succeed, fail or just not care.
1071 if (ResTy != MatchOperand_NoMatch)
1072 return ResTy;
1073
1074 switch (getLexer().getKind()) {
1075 default:
1076 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1077 return MatchOperand_ParseFail;
1078 case AsmToken::Identifier: {
1079 // It might be in the LSL/UXTB family ...
1080 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1081
1082 // We can only continue if no tokens were eaten.
1083 if (GotShift != MatchOperand_NoMatch)
1084 return GotShift;
1085
1086 // ... or it might be a register ...
1087 uint32_t NumLanes = 0;
1088 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1089 assert(GotReg != MatchOperand_ParseFail
1090 && "register parsing shouldn't partially succeed");
1091
1092 if (GotReg == MatchOperand_Success) {
1093 if (Parser.getTok().is(AsmToken::LBrac))
1094 return ParseNEONLane(Operands, NumLanes);
1095 else
1096 return MatchOperand_Success;
1097 }
1098
1099 // ... or it might be a symbolish thing
1100 }
1101 // Fall through
1102 case AsmToken::LParen: // E.g. (strcmp-4)
1103 case AsmToken::Integer: // 1f, 2b labels
1104 case AsmToken::String: // quoted labels
1105 case AsmToken::Dot: // . is Current location
1106 case AsmToken::Dollar: // $ is PC
1107 case AsmToken::Colon: {
1108 SMLoc StartLoc = Parser.getTok().getLoc();
1109 SMLoc EndLoc;
1110 const MCExpr *ImmVal = 0;
1111
1112 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1113 return MatchOperand_ParseFail;
1114
1115 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1116 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1117 return MatchOperand_Success;
1118 }
1119 case AsmToken::Hash: { // Immediates
1120 SMLoc StartLoc = Parser.getTok().getLoc();
1121 SMLoc EndLoc;
1122 const MCExpr *ImmVal = 0;
1123 Parser.Lex();
1124
1125 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1126 return MatchOperand_ParseFail;
1127
1128 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1129 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1130 return MatchOperand_Success;
1131 }
1132 case AsmToken::LBrac: {
1133 SMLoc Loc = Parser.getTok().getLoc();
1134 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1135 Parser.Lex(); // Eat '['
1136
1137 // There's no comma after a '[', so we can parse the next operand
1138 // immediately.
1139 return ParseOperand(Operands, Mnemonic);
1140 }
1141 // The following will likely be useful later, but not in very early cases
1142 case AsmToken::LCurly: // Weird SIMD lists
1143 llvm_unreachable("Don't know how to deal with '{' in operand");
1144 return MatchOperand_ParseFail;
1145 }
1146}
1147
1148AArch64AsmParser::OperandMatchResultTy
1149AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1150 if (getLexer().is(AsmToken::Colon)) {
1151 AArch64MCExpr::VariantKind RefKind;
1152
1153 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1154 if (ResTy != MatchOperand_Success)
1155 return ResTy;
1156
1157 const MCExpr *SubExprVal;
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001158 if (getParser().parseExpression(SubExprVal))
Tim Northovere0e3aef2013-01-31 12:12:40 +00001159 return MatchOperand_ParseFail;
1160
1161 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1162 return MatchOperand_Success;
1163 }
1164
1165 // No weird AArch64MCExpr prefix
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001166 return getParser().parseExpression(ExprVal)
Tim Northovere0e3aef2013-01-31 12:12:40 +00001167 ? MatchOperand_ParseFail : MatchOperand_Success;
1168}
1169
1170// A lane attached to a NEON register. "[N]", which should yield three tokens:
1171// '[', N, ']'. A hash is not allowed to precede the immediate here.
1172AArch64AsmParser::OperandMatchResultTy
1173AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1174 uint32_t NumLanes) {
1175 SMLoc Loc = Parser.getTok().getLoc();
1176
1177 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1178 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1179 Parser.Lex(); // Eat '['
1180
1181 if (Parser.getTok().isNot(AsmToken::Integer)) {
1182 Error(Parser.getTok().getLoc(), "expected lane number");
1183 return MatchOperand_ParseFail;
1184 }
1185
1186 if (Parser.getTok().getIntVal() >= NumLanes) {
1187 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1188 return MatchOperand_ParseFail;
1189 }
1190
1191 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1192 getContext());
1193 SMLoc S = Parser.getTok().getLoc();
1194 Parser.Lex(); // Eat actual lane
1195 SMLoc E = Parser.getTok().getLoc();
1196 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1197
1198
1199 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1200 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1201 return MatchOperand_ParseFail;
1202 }
1203
1204 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1205 Parser.Lex(); // Eat ']'
1206
1207 return MatchOperand_Success;
1208}
1209
1210AArch64AsmParser::OperandMatchResultTy
1211AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1212 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1213 Parser.Lex();
1214
1215 if (getLexer().isNot(AsmToken::Identifier)) {
1216 Error(Parser.getTok().getLoc(),
1217 "expected relocation specifier in operand after ':'");
1218 return MatchOperand_ParseFail;
1219 }
1220
Tim Northover24937c12013-02-04 15:44:38 +00001221 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1222 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
Tim Northovere0e3aef2013-01-31 12:12:40 +00001223 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1224 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1225 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1226 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1227 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1228 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1229 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1230 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1231 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1232 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1233 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1234 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1235 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1236 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1237 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1238 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1239 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1240 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1241 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1242 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1243 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1244 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1245 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1246 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1247 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1248 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1249 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1250 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1251 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1252 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1253 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1254 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1255 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1256 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1257 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1258 .Default(AArch64MCExpr::VK_AARCH64_None);
1259
1260 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1261 Error(Parser.getTok().getLoc(),
1262 "expected relocation specifier in operand after ':'");
1263 return MatchOperand_ParseFail;
1264 }
1265 Parser.Lex(); // Eat identifier
1266
1267 if (getLexer().isNot(AsmToken::Colon)) {
1268 Error(Parser.getTok().getLoc(),
1269 "expected ':' after relocation specifier");
1270 return MatchOperand_ParseFail;
1271 }
1272 Parser.Lex();
1273 return MatchOperand_Success;
1274}
1275
1276AArch64AsmParser::OperandMatchResultTy
1277AArch64AsmParser::ParseImmWithLSLOperand(
1278 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1279 // FIXME?: I want to live in a world where immediates must start with
1280 // #. Please don't dash my hopes (well, do if you have a good reason).
1281 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1282
1283 SMLoc S = Parser.getTok().getLoc();
1284 Parser.Lex(); // Eat '#'
1285
1286 const MCExpr *Imm;
1287 if (ParseImmediate(Imm) != MatchOperand_Success)
1288 return MatchOperand_ParseFail;
1289 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1290 SMLoc E = Parser.getTok().getLoc();
1291 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1292 return MatchOperand_Success;
1293 }
1294
1295 // Eat ','
1296 Parser.Lex();
1297
1298 // The optional operand must be "lsl #N" where N is non-negative.
1299 if (Parser.getTok().is(AsmToken::Identifier)
1300 && Parser.getTok().getIdentifier().lower() == "lsl") {
1301 Parser.Lex();
1302
1303 if (Parser.getTok().is(AsmToken::Hash)) {
1304 Parser.Lex();
1305
1306 if (Parser.getTok().isNot(AsmToken::Integer)) {
1307 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1308 return MatchOperand_ParseFail;
1309 }
1310 }
1311 }
1312
1313 int64_t ShiftAmount = Parser.getTok().getIntVal();
1314
1315 if (ShiftAmount < 0) {
1316 Error(Parser.getTok().getLoc(), "positive shift amount required");
1317 return MatchOperand_ParseFail;
1318 }
1319 Parser.Lex(); // Eat the number
1320
1321 SMLoc E = Parser.getTok().getLoc();
1322 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1323 false, S, E));
1324 return MatchOperand_Success;
1325}
1326
1327
1328AArch64AsmParser::OperandMatchResultTy
1329AArch64AsmParser::ParseCondCodeOperand(
1330 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1331 if (Parser.getTok().isNot(AsmToken::Identifier))
1332 return MatchOperand_NoMatch;
1333
1334 StringRef Tok = Parser.getTok().getIdentifier();
1335 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1336
1337 if (CondCode == A64CC::Invalid)
1338 return MatchOperand_NoMatch;
1339
1340 SMLoc S = Parser.getTok().getLoc();
1341 Parser.Lex(); // Eat condition code
1342 SMLoc E = Parser.getTok().getLoc();
1343
1344 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1345 return MatchOperand_Success;
1346}
1347
1348AArch64AsmParser::OperandMatchResultTy
1349AArch64AsmParser::ParseCRxOperand(
1350 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1351 SMLoc S = Parser.getTok().getLoc();
1352 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1353 Error(S, "Expected cN operand where 0 <= N <= 15");
1354 return MatchOperand_ParseFail;
1355 }
1356
Tim Northover24937c12013-02-04 15:44:38 +00001357 std::string LowerTok = Parser.getTok().getIdentifier().lower();
1358 StringRef Tok(LowerTok);
Tim Northovere0e3aef2013-01-31 12:12:40 +00001359 if (Tok[0] != 'c') {
1360 Error(S, "Expected cN operand where 0 <= N <= 15");
1361 return MatchOperand_ParseFail;
1362 }
1363
1364 uint32_t CRNum;
1365 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1366 if (BadNum || CRNum > 15) {
1367 Error(S, "Expected cN operand where 0 <= N <= 15");
1368 return MatchOperand_ParseFail;
1369 }
1370
1371 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1372
1373 Parser.Lex();
1374 SMLoc E = Parser.getTok().getLoc();
1375
1376 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1377 return MatchOperand_Success;
1378}
1379
1380AArch64AsmParser::OperandMatchResultTy
1381AArch64AsmParser::ParseFPImmOperand(
1382 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1383
1384 // FIXME?: I want to live in a world where immediates must start with
1385 // #. Please don't dash my hopes (well, do if you have a good reason).
1386 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1387
1388 SMLoc S = Parser.getTok().getLoc();
1389 Parser.Lex(); // Eat '#'
1390
1391 bool Negative = false;
1392 if (Parser.getTok().is(AsmToken::Minus)) {
1393 Negative = true;
1394 Parser.Lex(); // Eat '-'
1395 } else if (Parser.getTok().is(AsmToken::Plus)) {
1396 Parser.Lex(); // Eat '+'
1397 }
1398
1399 if (Parser.getTok().isNot(AsmToken::Real)) {
1400 Error(S, "Expected floating-point immediate");
1401 return MatchOperand_ParseFail;
1402 }
1403
1404 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1405 if (Negative) RealVal.changeSign();
1406 double DblVal = RealVal.convertToDouble();
1407
1408 Parser.Lex(); // Eat real number
1409 SMLoc E = Parser.getTok().getLoc();
1410
1411 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1412 return MatchOperand_Success;
1413}
1414
1415
1416// Automatically generated
1417static unsigned MatchRegisterName(StringRef Name);
1418
1419bool
1420AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1421 StringRef &Layout,
1422 SMLoc &LayoutLoc) const {
1423 const AsmToken &Tok = Parser.getTok();
1424
1425 if (Tok.isNot(AsmToken::Identifier))
1426 return false;
1427
1428 std::string LowerReg = Tok.getString().lower();
1429 size_t DotPos = LowerReg.find('.');
1430
1431 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1432 if (RegNum == AArch64::NoRegister) {
1433 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1434 .Case("ip0", AArch64::X16)
1435 .Case("ip1", AArch64::X17)
1436 .Case("fp", AArch64::X29)
1437 .Case("lr", AArch64::X30)
1438 .Default(AArch64::NoRegister);
1439 }
1440 if (RegNum == AArch64::NoRegister)
1441 return false;
1442
1443 SMLoc S = Tok.getLoc();
1444 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1445
1446 if (DotPos == StringRef::npos) {
1447 Layout = StringRef();
1448 } else {
1449 // Everything afterwards needs to be a literal token, expected to be
1450 // '.2d','.b' etc for vector registers.
1451
1452 // This StringSwitch validates the input and (perhaps more importantly)
1453 // gives us a permanent string to use in the token (a pointer into LowerReg
1454 // would go out of scope when we return).
1455 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
Tim Northover96e49462013-02-05 15:01:51 +00001456 std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1457 Layout = StringSwitch<const char *>(LayoutText)
Tim Northovere0e3aef2013-01-31 12:12:40 +00001458 .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1459 .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1460 .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1461 .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1462 .Default("");
1463
1464 if (Layout.size() == 0) {
1465 // Malformed register
1466 return false;
1467 }
1468 }
1469
1470 return true;
1471}
1472
1473AArch64AsmParser::OperandMatchResultTy
1474AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1475 uint32_t &NumLanes) {
1476 unsigned RegNum;
1477 StringRef Layout;
1478 SMLoc RegEndLoc, LayoutLoc;
1479 SMLoc S = Parser.getTok().getLoc();
1480
1481 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1482 return MatchOperand_NoMatch;
1483
1484 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1485
1486 if (Layout.size() != 0) {
1487 unsigned long long TmpLanes = 0;
1488 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1489 if (TmpLanes != 0) {
1490 NumLanes = TmpLanes;
1491 } else {
1492 // If the number of lanes isn't specified explicitly, a valid instruction
1493 // will have an element specifier and be capable of acting on the entire
1494 // vector register.
1495 switch (Layout.back()) {
1496 default: llvm_unreachable("Invalid layout specifier");
1497 case 'b': NumLanes = 16; break;
1498 case 'h': NumLanes = 8; break;
1499 case 's': NumLanes = 4; break;
1500 case 'd': NumLanes = 2; break;
1501 }
1502 }
1503
1504 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1505 }
1506
1507 Parser.Lex();
1508 return MatchOperand_Success;
1509}
1510
1511bool
1512AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1513 SMLoc &EndLoc) {
1514 // This callback is used for things like DWARF frame directives in
1515 // assembly. They don't care about things like NEON layouts or lanes, they
1516 // just want to be able to produce the DWARF register number.
1517 StringRef LayoutSpec;
1518 SMLoc RegEndLoc, LayoutLoc;
1519 StartLoc = Parser.getTok().getLoc();
1520
1521 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1522 return true;
1523
1524 Parser.Lex();
1525 EndLoc = Parser.getTok().getLoc();
1526
1527 return false;
1528}
1529
1530AArch64AsmParser::OperandMatchResultTy
1531AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1532 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1533 // Since these operands occur in very limited circumstances, without
1534 // alternatives, we actually signal an error if there is no match. If relaxing
1535 // this, beware of unintended consequences: an immediate will be accepted
1536 // during matching, no matter how it gets into the AArch64Operand.
1537 const AsmToken &Tok = Parser.getTok();
1538 SMLoc S = Tok.getLoc();
1539
1540 if (Tok.is(AsmToken::Identifier)) {
1541 bool ValidName;
1542 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1543
1544 if (!ValidName) {
1545 Error(S, "operand specifier not recognised");
1546 return MatchOperand_ParseFail;
1547 }
1548
1549 Parser.Lex(); // We're done with the identifier. Eat it
1550
1551 SMLoc E = Parser.getTok().getLoc();
1552 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1553 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1554 return MatchOperand_Success;
1555 } else if (Tok.is(AsmToken::Hash)) {
1556 Parser.Lex();
1557
1558 const MCExpr *ImmVal;
1559 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1560 return MatchOperand_ParseFail;
1561
1562 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1563 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1564 Error(S, "Invalid immediate for instruction");
1565 return MatchOperand_ParseFail;
1566 }
1567
1568 SMLoc E = Parser.getTok().getLoc();
1569 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1570 return MatchOperand_Success;
1571 }
1572
1573 Error(S, "unexpected operand for instruction");
1574 return MatchOperand_ParseFail;
1575}
1576
1577AArch64AsmParser::OperandMatchResultTy
1578AArch64AsmParser::ParseSysRegOperand(
1579 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1580 const AsmToken &Tok = Parser.getTok();
1581
1582 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1583 // kind of string: SPSel is valid for two different forms of MSR with two
1584 // different encodings. There's no collision at the moment, but the potential
1585 // is there.
1586 if (!Tok.is(AsmToken::Identifier)) {
1587 return MatchOperand_NoMatch;
1588 }
1589
1590 SMLoc S = Tok.getLoc();
1591 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1592 Parser.Lex(); // Eat identifier
1593
1594 return MatchOperand_Success;
1595}
1596
1597AArch64AsmParser::OperandMatchResultTy
1598AArch64AsmParser::ParseLSXAddressOperand(
1599 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1600 SMLoc S = Parser.getTok().getLoc();
1601
1602 unsigned RegNum;
1603 SMLoc RegEndLoc, LayoutLoc;
1604 StringRef Layout;
1605 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1606 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1607 || Layout.size() != 0) {
1608 // Check Layout.size because we don't want to let "x3.4s" or similar
1609 // through.
1610 return MatchOperand_NoMatch;
1611 }
1612 Parser.Lex(); // Eat register
1613
1614 if (Parser.getTok().is(AsmToken::RBrac)) {
1615 // We're done
1616 SMLoc E = Parser.getTok().getLoc();
1617 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1618 return MatchOperand_Success;
1619 }
1620
1621 // Otherwise, only ", #0" is valid
1622
1623 if (Parser.getTok().isNot(AsmToken::Comma)) {
1624 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1625 return MatchOperand_ParseFail;
1626 }
1627 Parser.Lex(); // Eat ','
1628
1629 if (Parser.getTok().isNot(AsmToken::Hash)) {
1630 Error(Parser.getTok().getLoc(), "expected '#0'");
1631 return MatchOperand_ParseFail;
1632 }
1633 Parser.Lex(); // Eat '#'
1634
1635 if (Parser.getTok().isNot(AsmToken::Integer)
1636 || Parser.getTok().getIntVal() != 0 ) {
1637 Error(Parser.getTok().getLoc(), "expected '#0'");
1638 return MatchOperand_ParseFail;
1639 }
1640 Parser.Lex(); // Eat '0'
1641
1642 SMLoc E = Parser.getTok().getLoc();
1643 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1644 return MatchOperand_Success;
1645}
1646
1647AArch64AsmParser::OperandMatchResultTy
1648AArch64AsmParser::ParseShiftExtend(
1649 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1650 StringRef IDVal = Parser.getTok().getIdentifier();
1651 std::string LowerID = IDVal.lower();
1652
1653 A64SE::ShiftExtSpecifiers Spec =
1654 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1655 .Case("lsl", A64SE::LSL)
1656 .Case("lsr", A64SE::LSR)
1657 .Case("asr", A64SE::ASR)
1658 .Case("ror", A64SE::ROR)
1659 .Case("uxtb", A64SE::UXTB)
1660 .Case("uxth", A64SE::UXTH)
1661 .Case("uxtw", A64SE::UXTW)
1662 .Case("uxtx", A64SE::UXTX)
1663 .Case("sxtb", A64SE::SXTB)
1664 .Case("sxth", A64SE::SXTH)
1665 .Case("sxtw", A64SE::SXTW)
1666 .Case("sxtx", A64SE::SXTX)
1667 .Default(A64SE::Invalid);
1668
1669 if (Spec == A64SE::Invalid)
1670 return MatchOperand_NoMatch;
1671
1672 // Eat the shift
1673 SMLoc S, E;
1674 S = Parser.getTok().getLoc();
1675 Parser.Lex();
1676
1677 if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
1678 Spec != A64SE::ASR && Spec != A64SE::ROR) {
1679 // The shift amount can be omitted for the extending versions, but not real
1680 // shifts:
1681 // add x0, x0, x0, uxtb
1682 // is valid, and equivalent to
1683 // add x0, x0, x0, uxtb #0
1684
1685 if (Parser.getTok().is(AsmToken::Comma) ||
1686 Parser.getTok().is(AsmToken::EndOfStatement) ||
1687 Parser.getTok().is(AsmToken::RBrac)) {
Tim Northoverbcaca872013-02-05 13:24:56 +00001688 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1689 S, E));
Tim Northovere0e3aef2013-01-31 12:12:40 +00001690 return MatchOperand_Success;
1691 }
1692 }
1693
1694 // Eat # at beginning of immediate
1695 if (!Parser.getTok().is(AsmToken::Hash)) {
1696 Error(Parser.getTok().getLoc(),
1697 "expected #imm after shift specifier");
1698 return MatchOperand_ParseFail;
1699 }
1700 Parser.Lex();
1701
1702 // Make sure we do actually have a number
1703 if (!Parser.getTok().is(AsmToken::Integer)) {
1704 Error(Parser.getTok().getLoc(),
1705 "expected integer shift amount");
1706 return MatchOperand_ParseFail;
1707 }
1708 unsigned Amount = Parser.getTok().getIntVal();
1709 Parser.Lex();
1710 E = Parser.getTok().getLoc();
1711
Tim Northoverbcaca872013-02-05 13:24:56 +00001712 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1713 S, E));
Tim Northovere0e3aef2013-01-31 12:12:40 +00001714
1715 return MatchOperand_Success;
1716}
1717
1718// FIXME: We would really like to be able to tablegen'erate this.
1719bool AArch64AsmParser::
1720validateInstruction(MCInst &Inst,
1721 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1722 switch (Inst.getOpcode()) {
1723 case AArch64::BFIwwii:
1724 case AArch64::BFIxxii:
1725 case AArch64::SBFIZwwii:
1726 case AArch64::SBFIZxxii:
1727 case AArch64::UBFIZwwii:
1728 case AArch64::UBFIZxxii: {
1729 unsigned ImmOps = Inst.getNumOperands() - 2;
1730 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1731 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1732
Tim Northovere2067782013-02-11 12:32:18 +00001733 if (ImmR != 0 && ImmS >= ImmR) {
Tim Northovere0e3aef2013-01-31 12:12:40 +00001734 return Error(Operands[4]->getStartLoc(),
1735 "requested insert overflows register");
1736 }
1737 return false;
1738 }
1739 case AArch64::BFXILwwii:
1740 case AArch64::BFXILxxii:
1741 case AArch64::SBFXwwii:
1742 case AArch64::SBFXxxii:
1743 case AArch64::UBFXwwii:
1744 case AArch64::UBFXxxii: {
1745 unsigned ImmOps = Inst.getNumOperands() - 2;
1746 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1747 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1748 int64_t RegWidth = 0;
1749 switch (Inst.getOpcode()) {
1750 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1751 RegWidth = 64;
1752 break;
1753 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1754 RegWidth = 32;
1755 break;
1756 }
1757
1758 if (ImmS >= RegWidth || ImmS < ImmR) {
1759 return Error(Operands[4]->getStartLoc(),
1760 "requested extract overflows register");
1761 }
1762 return false;
1763 }
1764 case AArch64::ICix: {
1765 int64_t ImmVal = Inst.getOperand(0).getImm();
1766 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1767 if (!A64IC::NeedsRegister(ICOp)) {
1768 return Error(Operands[1]->getStartLoc(),
1769 "specified IC op does not use a register");
1770 }
1771 return false;
1772 }
1773 case AArch64::ICi: {
1774 int64_t ImmVal = Inst.getOperand(0).getImm();
1775 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1776 if (A64IC::NeedsRegister(ICOp)) {
1777 return Error(Operands[1]->getStartLoc(),
1778 "specified IC op requires a register");
1779 }
1780 return false;
1781 }
1782 case AArch64::TLBIix: {
1783 int64_t ImmVal = Inst.getOperand(0).getImm();
1784 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1785 if (!A64TLBI::NeedsRegister(TLBIOp)) {
1786 return Error(Operands[1]->getStartLoc(),
1787 "specified TLBI op does not use a register");
1788 }
1789 return false;
1790 }
1791 case AArch64::TLBIi: {
1792 int64_t ImmVal = Inst.getOperand(0).getImm();
1793 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1794 if (A64TLBI::NeedsRegister(TLBIOp)) {
1795 return Error(Operands[1]->getStartLoc(),
1796 "specified TLBI op requires a register");
1797 }
1798 return false;
1799 }
1800 }
1801
1802 return false;
1803}
1804
1805
1806// Parses the instruction *together with* all operands, appending each parsed
1807// operand to the "Operands" list
1808bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1809 StringRef Name, SMLoc NameLoc,
1810 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1811 size_t CondCodePos = Name.find('.');
1812
1813 StringRef Mnemonic = Name.substr(0, CondCodePos);
1814 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1815
1816 if (CondCodePos != StringRef::npos) {
1817 // We have a condition code
1818 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1819 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1820 A64CC::CondCodes Code;
1821
1822 Code = A64StringToCondCode(CondStr);
1823
1824 if (Code == A64CC::Invalid) {
1825 Error(S, "invalid condition code");
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001826 Parser.eatToEndOfStatement();
Tim Northovere0e3aef2013-01-31 12:12:40 +00001827 return true;
1828 }
1829
1830 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1831
1832 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
1833 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1834 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1835 }
1836
1837 // Now we parse the operands of this instruction
1838 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1839 // Read the first operand.
1840 if (ParseOperand(Operands, Mnemonic)) {
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001841 Parser.eatToEndOfStatement();
Tim Northovere0e3aef2013-01-31 12:12:40 +00001842 return true;
1843 }
1844
1845 while (getLexer().is(AsmToken::Comma)) {
1846 Parser.Lex(); // Eat the comma.
1847
1848 // Parse and remember the operand.
1849 if (ParseOperand(Operands, Mnemonic)) {
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001850 Parser.eatToEndOfStatement();
Tim Northovere0e3aef2013-01-31 12:12:40 +00001851 return true;
1852 }
1853
1854
1855 // After successfully parsing some operands there are two special cases to
1856 // consider (i.e. notional operands not separated by commas). Both are due
1857 // to memory specifiers:
1858 // + An RBrac will end an address for load/store/prefetch
1859 // + An '!' will indicate a pre-indexed operation.
1860 //
1861 // It's someone else's responsibility to make sure these tokens are sane
1862 // in the given context!
1863 if (Parser.getTok().is(AsmToken::RBrac)) {
1864 SMLoc Loc = Parser.getTok().getLoc();
1865 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1866 Parser.Lex();
1867 }
1868
1869 if (Parser.getTok().is(AsmToken::Exclaim)) {
1870 SMLoc Loc = Parser.getTok().getLoc();
1871 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
1872 Parser.Lex();
1873 }
1874 }
1875 }
1876
1877 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1878 SMLoc Loc = getLexer().getLoc();
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001879 Parser.eatToEndOfStatement();
Tim Northover60baeb92013-02-11 09:29:37 +00001880 return Error(Loc, "expected comma before next operand");
Tim Northovere0e3aef2013-01-31 12:12:40 +00001881 }
1882
1883 // Eat the EndOfStatement
1884 Parser.Lex();
1885
1886 return false;
1887}
1888
1889bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
1890 StringRef IDVal = DirectiveID.getIdentifier();
1891 if (IDVal == ".hword")
1892 return ParseDirectiveWord(2, DirectiveID.getLoc());
1893 else if (IDVal == ".word")
1894 return ParseDirectiveWord(4, DirectiveID.getLoc());
1895 else if (IDVal == ".xword")
1896 return ParseDirectiveWord(8, DirectiveID.getLoc());
1897 else if (IDVal == ".tlsdesccall")
1898 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
1899
1900 return true;
1901}
1902
1903/// parseDirectiveWord
1904/// ::= .word [ expression (, expression)* ]
1905bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
1906 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1907 for (;;) {
1908 const MCExpr *Value;
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001909 if (getParser().parseExpression(Value))
Tim Northovere0e3aef2013-01-31 12:12:40 +00001910 return true;
1911
1912 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
1913
1914 if (getLexer().is(AsmToken::EndOfStatement))
1915 break;
1916
1917 // FIXME: Improve diagnostic.
1918 if (getLexer().isNot(AsmToken::Comma))
1919 return Error(L, "unexpected token in directive");
1920 Parser.Lex();
1921 }
1922 }
1923
1924 Parser.Lex();
1925 return false;
1926}
1927
1928// parseDirectiveTLSDescCall:
1929// ::= .tlsdesccall symbol
1930bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
1931 StringRef Name;
Jim Grosbachd2037eb2013-02-20 22:21:35 +00001932 if (getParser().parseIdentifier(Name))
Tim Northovere0e3aef2013-01-31 12:12:40 +00001933 return Error(L, "expected symbol after directive");
1934
1935 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
1936 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
1937
1938 MCInst Inst;
1939 Inst.setOpcode(AArch64::TLSDESCCALL);
1940 Inst.addOperand(MCOperand::CreateExpr(Expr));
1941
1942 getParser().getStreamer().EmitInstruction(Inst);
1943 return false;
1944}
1945
1946
1947bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1948 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1949 MCStreamer &Out, unsigned &ErrorInfo,
1950 bool MatchingInlineAsm) {
1951 MCInst Inst;
1952 unsigned MatchResult;
Tim Northoverbcaca872013-02-05 13:24:56 +00001953 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
Tim Northovere0e3aef2013-01-31 12:12:40 +00001954 MatchingInlineAsm);
Tim Northover60baeb92013-02-11 09:29:37 +00001955
1956 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
1957 return Error(IDLoc, "too few operands for instruction");
1958
Tim Northovere0e3aef2013-01-31 12:12:40 +00001959 switch (MatchResult) {
1960 default: break;
1961 case Match_Success:
1962 if (validateInstruction(Inst, Operands))
1963 return true;
1964
1965 Out.EmitInstruction(Inst);
1966 return false;
1967 case Match_MissingFeature:
1968 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
1969 return true;
1970 case Match_InvalidOperand: {
1971 SMLoc ErrorLoc = IDLoc;
1972 if (ErrorInfo != ~0U) {
Tim Northovere0e3aef2013-01-31 12:12:40 +00001973 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
1974 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
1975 }
1976
1977 return Error(ErrorLoc, "invalid operand for instruction");
1978 }
1979 case Match_MnemonicFail:
1980 return Error(IDLoc, "invalid instruction");
Tim Northover60baeb92013-02-11 09:29:37 +00001981
1982 case Match_AddSubRegExtendSmall:
1983 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1984 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
1985 case Match_AddSubRegExtendLarge:
1986 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1987 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
1988 case Match_AddSubRegShift32:
1989 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1990 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
1991 case Match_AddSubRegShift64:
1992 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1993 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
1994 case Match_AddSubSecondSource:
1995 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1996 "expected compatible register, symbol or integer in range [0, 4095]");
1997 case Match_CVTFixedPos32:
1998 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1999 "expected integer in range [1, 32]");
2000 case Match_CVTFixedPos64:
2001 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2002 "expected integer in range [1, 64]");
2003 case Match_CondCode:
2004 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2005 "expected AArch64 condition code");
2006 case Match_FPImm:
2007 // Any situation which allows a nontrivial floating-point constant also
2008 // allows a register.
2009 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2010 "expected compatible register or floating-point constant");
2011 case Match_FPZero:
2012 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2013 "expected floating-point constant #0.0");
2014 case Match_Label:
2015 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2016 "expected label or encodable integer pc offset");
2017 case Match_Lane1:
2018 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2019 "expected lane specifier '[1]'");
2020 case Match_LoadStoreExtend32_1:
2021 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2022 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2023 case Match_LoadStoreExtend32_2:
2024 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2025 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2026 case Match_LoadStoreExtend32_4:
2027 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2028 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2029 case Match_LoadStoreExtend32_8:
2030 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2031 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2032 case Match_LoadStoreExtend32_16:
2033 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2034 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2035 case Match_LoadStoreExtend64_1:
2036 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2037 "expected 'lsl' or 'sxtx' with optional shift of #0");
2038 case Match_LoadStoreExtend64_2:
2039 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2040 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2041 case Match_LoadStoreExtend64_4:
2042 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2043 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2044 case Match_LoadStoreExtend64_8:
2045 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2046 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2047 case Match_LoadStoreExtend64_16:
2048 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2049 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2050 case Match_LoadStoreSImm7_4:
2051 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2052 "expected integer multiple of 4 in range [-256, 252]");
2053 case Match_LoadStoreSImm7_8:
2054 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2055 "expected integer multiple of 8 in range [-512, 508]");
2056 case Match_LoadStoreSImm7_16:
2057 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2058 "expected integer multiple of 16 in range [-1024, 1016]");
2059 case Match_LoadStoreSImm9:
2060 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2061 "expected integer in range [-256, 255]");
2062 case Match_LoadStoreUImm12_1:
2063 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2064 "expected symbolic reference or integer in range [0, 4095]");
2065 case Match_LoadStoreUImm12_2:
2066 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2067 "expected symbolic reference or integer in range [0, 8190]");
2068 case Match_LoadStoreUImm12_4:
2069 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2070 "expected symbolic reference or integer in range [0, 16380]");
2071 case Match_LoadStoreUImm12_8:
2072 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2073 "expected symbolic reference or integer in range [0, 32760]");
2074 case Match_LoadStoreUImm12_16:
2075 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2076 "expected symbolic reference or integer in range [0, 65520]");
2077 case Match_LogicalSecondSource:
2078 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2079 "expected compatible register or logical immediate");
2080 case Match_MOVWUImm16:
2081 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2082 "expected relocated symbol or integer in range [0, 65535]");
2083 case Match_MRS:
2084 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2085 "expected readable system register");
2086 case Match_MSR:
2087 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2088 "expected writable system register or pstate");
2089 case Match_NamedImm_at:
2090 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2091 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2092 case Match_NamedImm_dbarrier:
2093 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2094 "expected integer in range [0, 15] or symbolic barrier operand");
2095 case Match_NamedImm_dc:
2096 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2097 "expected symbolic 'dc' operand");
2098 case Match_NamedImm_ic:
2099 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2100 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2101 case Match_NamedImm_isb:
2102 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2103 "expected integer in range [0, 15] or 'sy'");
2104 case Match_NamedImm_prefetch:
2105 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2106 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2107 case Match_NamedImm_tlbi:
2108 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2109 "expected translation buffer invalidation operand");
2110 case Match_UImm16:
2111 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2112 "expected integer in range [0, 65535]");
2113 case Match_UImm3:
2114 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2115 "expected integer in range [0, 7]");
2116 case Match_UImm4:
2117 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2118 "expected integer in range [0, 15]");
2119 case Match_UImm5:
2120 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2121 "expected integer in range [0, 31]");
2122 case Match_UImm6:
2123 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2124 "expected integer in range [0, 63]");
2125 case Match_UImm7:
2126 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2127 "expected integer in range [0, 127]");
2128 case Match_Width32:
2129 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2130 "expected integer in range [<lsb>, 31]");
2131 case Match_Width64:
2132 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2133 "expected integer in range [<lsb>, 63]");
Tim Northovere0e3aef2013-01-31 12:12:40 +00002134 }
2135
2136 llvm_unreachable("Implement any new match types added!");
2137 return true;
2138}
2139
2140void AArch64Operand::print(raw_ostream &OS) const {
2141 switch (Kind) {
2142 case k_CondCode:
2143 OS << "<CondCode: " << CondCode.Code << ">";
2144 break;
2145 case k_FPImmediate:
2146 OS << "<fpimm: " << FPImm.Val << ">";
2147 break;
2148 case k_ImmWithLSL:
2149 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2150 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2151 break;
2152 case k_Immediate:
2153 getImm()->print(OS);
2154 break;
2155 case k_Register:
2156 OS << "<register " << getReg() << '>';
2157 break;
2158 case k_Token:
2159 OS << '\'' << getToken() << '\'';
2160 break;
2161 case k_ShiftExtend:
2162 OS << "<shift: type=" << ShiftExtend.ShiftType
2163 << ", amount=" << ShiftExtend.Amount << ">";
2164 break;
2165 case k_SysReg: {
2166 StringRef Name(SysReg.Data, SysReg.Length);
2167 OS << "<sysreg: " << Name << '>';
2168 break;
2169 }
2170 default:
2171 llvm_unreachable("No idea how to print this kind of operand");
2172 break;
2173 }
2174}
2175
2176void AArch64Operand::dump() const {
2177 print(errs());
2178}
2179
2180
2181/// Force static initialization.
2182extern "C" void LLVMInitializeAArch64AsmParser() {
2183 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2184}
2185
2186#define GET_REGISTER_MATCHER
2187#define GET_MATCHER_IMPLEMENTATION
2188#include "AArch64GenAsmMatcher.inc"