blob: bc0f396b2835598a5eb41b9411b7034c7a6c758b [file] [log] [blame]
Tim Northovere0e3aef2013-01-31 12:12:40 +00001//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10
Tim Northovere0e3aef2013-01-31 12:12:40 +000011#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "MCTargetDesc/AArch64MCExpr.h"
Tim Northover969afbe2013-02-05 13:24:47 +000013#include "Utils/AArch64BaseInfo.h"
Tim Northovere0e3aef2013-01-31 12:12:40 +000014#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCInst.h"
20#include "llvm/MC/MCSubtargetInfo.h"
21#include "llvm/MC/MCTargetAsmParser.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCRegisterInfo.h"
24#include "llvm/MC/MCStreamer.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/raw_ostream.h"
30#include "llvm/Support/TargetRegistry.h"
31
32using namespace llvm;
33
34namespace {
35
36class AArch64Operand;
37
38class AArch64AsmParser : public MCTargetAsmParser {
39 MCSubtargetInfo &STI;
40 MCAsmParser &Parser;
41
42#define GET_ASSEMBLER_HEADER
43#include "AArch64GenAsmMatcher.inc"
44
45public:
46 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
47 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
48 MCAsmParserExtension::Initialize(_Parser);
49
50 // Initialize the set of available features.
51 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
52 }
53
54 // These are the public interface of the MCTargetAsmParser
55 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
56 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
57 SMLoc NameLoc,
58 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
59
60 bool ParseDirective(AsmToken DirectiveID);
61 bool ParseDirectiveTLSDescCall(SMLoc L);
62 bool ParseDirectiveWord(unsigned Size, SMLoc L);
63
64 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
65 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
66 MCStreamer&Out, unsigned &ErrorInfo,
67 bool MatchingInlineAsm);
68
69 // The rest of the sub-parsers have more freedom over interface: they return
70 // an OperandMatchResultTy because it's less ambiguous than true/false or
71 // -1/0/1 even if it is more verbose
72 OperandMatchResultTy
73 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
74 StringRef Mnemonic);
75
76 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
77
78 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
79
80 OperandMatchResultTy
81 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
82 uint32_t NumLanes);
83
84 OperandMatchResultTy
85 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
86 uint32_t &NumLanes);
87
88 OperandMatchResultTy
89 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
90
91 OperandMatchResultTy
92 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
93
94 OperandMatchResultTy
95 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
96
97 OperandMatchResultTy
98 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
99
100 template<typename SomeNamedImmMapper> OperandMatchResultTy
101 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
102 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
103 }
104
105 OperandMatchResultTy
106 ParseNamedImmOperand(const NamedImmMapper &Mapper,
107 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
108
109 OperandMatchResultTy
110 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
111
112 OperandMatchResultTy
113 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
114
115 OperandMatchResultTy
116 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
117
118 bool validateInstruction(MCInst &Inst,
119 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
120
121 /// Scan the next token (which had better be an identifier) and determine
122 /// whether it represents a general-purpose or vector register. It returns
123 /// true if an identifier was found and populates its reference arguments. It
124 /// does not consume the token.
125 bool
126 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
127 SMLoc &LayoutLoc) const;
128
129};
130
131}
132
133namespace {
134
135/// Instances of this class represent a parsed AArch64 machine instruction.
136class AArch64Operand : public MCParsedAsmOperand {
137private:
138 enum KindTy {
139 k_ImmWithLSL, // #uimm {, LSL #amt }
140 k_CondCode, // eq/ne/...
141 k_FPImmediate, // Limited-precision floating-point imm
142 k_Immediate, // Including expressions referencing symbols
143 k_Register,
144 k_ShiftExtend,
145 k_SysReg, // The register operand of MRS and MSR instructions
146 k_Token, // The mnemonic; other raw tokens the auto-generated
147 k_WrappedRegister // Load/store exclusive permit a wrapped register.
148 } Kind;
149
150 SMLoc StartLoc, EndLoc;
151
152 union {
153 struct {
154 const MCExpr *Val;
155 unsigned ShiftAmount;
156 bool ImplicitAmount;
157 } ImmWithLSL;
158
159 struct {
160 A64CC::CondCodes Code;
161 } CondCode;
162
163 struct {
164 double Val;
165 } FPImm;
166
167 struct {
168 const MCExpr *Val;
169 } Imm;
170
171 struct {
172 unsigned RegNum;
173 } Reg;
174
175 struct {
176 A64SE::ShiftExtSpecifiers ShiftType;
177 unsigned Amount;
178 bool ImplicitAmount;
179 } ShiftExtend;
180
181 struct {
182 const char *Data;
183 unsigned Length;
184 } SysReg;
185
186 struct {
187 const char *Data;
188 unsigned Length;
189 } Tok;
190 };
191
192 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
193 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
194
195public:
196 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
197 }
198
199 SMLoc getStartLoc() const { return StartLoc; }
200 SMLoc getEndLoc() const { return EndLoc; }
201 void print(raw_ostream&) const;
202 void dump() const;
203
204 StringRef getToken() const {
205 assert(Kind == k_Token && "Invalid access!");
206 return StringRef(Tok.Data, Tok.Length);
207 }
208
209 unsigned getReg() const {
210 assert((Kind == k_Register || Kind == k_WrappedRegister)
211 && "Invalid access!");
212 return Reg.RegNum;
213 }
214
215 const MCExpr *getImm() const {
216 assert(Kind == k_Immediate && "Invalid access!");
217 return Imm.Val;
218 }
219
220 A64CC::CondCodes getCondCode() const {
221 assert(Kind == k_CondCode && "Invalid access!");
222 return CondCode.Code;
223 }
224
225 static bool isNonConstantExpr(const MCExpr *E,
226 AArch64MCExpr::VariantKind &Variant) {
227 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
228 Variant = A64E->getKind();
229 return true;
230 } else if (!isa<MCConstantExpr>(E)) {
231 Variant = AArch64MCExpr::VK_AARCH64_None;
232 return true;
233 }
234
235 return false;
236 }
237
238 bool isCondCode() const { return Kind == k_CondCode; }
239 bool isToken() const { return Kind == k_Token; }
240 bool isReg() const { return Kind == k_Register; }
241 bool isImm() const { return Kind == k_Immediate; }
242 bool isMem() const { return false; }
243 bool isFPImm() const { return Kind == k_FPImmediate; }
244 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
245 bool isSysReg() const { return Kind == k_SysReg; }
246 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
247 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
248
249 bool isAddSubImmLSL0() const {
250 if (!isImmWithLSL()) return false;
251 if (ImmWithLSL.ShiftAmount != 0) return false;
252
253 AArch64MCExpr::VariantKind Variant;
254 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
255 return Variant == AArch64MCExpr::VK_AARCH64_LO12
256 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
257 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
258 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
259 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
260 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
261 }
262
263 // Otherwise it should be a real immediate in range:
264 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
265 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
266 }
267
268 bool isAddSubImmLSL12() const {
269 if (!isImmWithLSL()) return false;
270 if (ImmWithLSL.ShiftAmount != 12) return false;
271
272 AArch64MCExpr::VariantKind Variant;
273 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
274 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
275 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
276 }
277
278 // Otherwise it should be a real immediate in range:
279 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
280 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
281 }
282
283 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
284 if (!isShiftOrExtend()) return false;
285
286 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
287 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
288 return false;
289
290 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
291 return false;
292
293 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
294 }
295
296 bool isAdrpLabel() const {
297 if (!isImm()) return false;
298
299 AArch64MCExpr::VariantKind Variant;
300 if (isNonConstantExpr(getImm(), Variant)) {
301 return Variant == AArch64MCExpr::VK_AARCH64_None
302 || Variant == AArch64MCExpr::VK_AARCH64_GOT
303 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
304 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
305 }
306
307 return isLabel<21, 4096>();
308 }
309
310 template<unsigned RegWidth> bool isBitfieldWidth() const {
311 if (!isImm()) return false;
312
313 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
314 if (!CE) return false;
315
316 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
317 }
318
319 template<int RegWidth>
320 bool isCVTFixedPos() const {
321 if (!isImm()) return false;
322
323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
324 if (!CE) return false;
325
326 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
327 }
328
329 bool isFMOVImm() const {
330 if (!isFPImm()) return false;
331
332 APFloat RealVal(FPImm.Val);
333 uint32_t ImmVal;
334 return A64Imms::isFPImm(RealVal, ImmVal);
335 }
336
337 bool isFPZero() const {
338 if (!isFPImm()) return false;
339
340 APFloat RealVal(FPImm.Val);
341 return RealVal.isPosZero();
342 }
343
344 template<unsigned field_width, unsigned scale>
345 bool isLabel() const {
346 if (!isImm()) return false;
347
348 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
349 return true;
350 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
351 int64_t Val = CE->getValue();
352 int64_t Min = - (scale * (1LL << (field_width - 1)));
353 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
354 return (Val % scale) == 0 && Val >= Min && Val <= Max;
355 }
356
357 // N.b. this disallows explicit relocation specifications via an
358 // AArch64MCExpr. Users needing that behaviour
359 return false;
360 }
361
362 bool isLane1() const {
363 if (!isImm()) return false;
364
365 // Because it's come through custom assembly parsing, it must always be a
366 // constant expression.
367 return cast<MCConstantExpr>(getImm())->getValue() == 1;
368 }
369
370 bool isLoadLitLabel() const {
371 if (!isImm()) return false;
372
373 AArch64MCExpr::VariantKind Variant;
374 if (isNonConstantExpr(getImm(), Variant)) {
375 return Variant == AArch64MCExpr::VK_AARCH64_None
376 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
377 }
378
379 return isLabel<19, 4>();
380 }
381
382 template<unsigned RegWidth> bool isLogicalImm() const {
383 if (!isImm()) return false;
384
385 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
386 if (!CE) return false;
387
388 uint32_t Bits;
389 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
390 }
391
392 template<unsigned RegWidth> bool isLogicalImmMOV() const {
393 if (!isLogicalImm<RegWidth>()) return false;
394
395 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
396
397 // The move alias for ORR is only valid if the immediate cannot be
398 // represented with a move (immediate) instruction; they take priority.
399 int UImm16, Shift;
400 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
401 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
402 }
403
404 template<int MemSize>
405 bool isOffsetUImm12() const {
406 if (!isImm()) return false;
407
408 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
409
410 // Assume they know what they're doing for now if they've given us a
411 // non-constant expression. In principle we could check for ridiculous
412 // things that can't possibly work or relocations that would almost
413 // certainly break resulting code.
414 if (!CE)
415 return true;
416
417 int64_t Val = CE->getValue();
418
419 // Must be a multiple of the access size in bytes.
420 if ((Val & (MemSize - 1)) != 0) return false;
421
422 // Must be 12-bit unsigned
423 return Val >= 0 && Val <= 0xfff * MemSize;
424 }
425
426 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
427 bool isShift() const {
428 if (!isShiftOrExtend()) return false;
429
430 if (ShiftExtend.ShiftType != SHKind)
431 return false;
432
433 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
434 }
435
436 bool isMOVN32Imm() const {
437 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
438 AArch64MCExpr::VK_AARCH64_SABS_G0,
439 AArch64MCExpr::VK_AARCH64_SABS_G1,
440 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
441 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
442 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
443 AArch64MCExpr::VK_AARCH64_TPREL_G1,
444 AArch64MCExpr::VK_AARCH64_TPREL_G0,
445 };
446 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
447
448 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
449 }
450
451 bool isMOVN64Imm() const {
452 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
453 AArch64MCExpr::VK_AARCH64_SABS_G0,
454 AArch64MCExpr::VK_AARCH64_SABS_G1,
455 AArch64MCExpr::VK_AARCH64_SABS_G2,
456 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
457 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
458 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
459 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
460 AArch64MCExpr::VK_AARCH64_TPREL_G2,
461 AArch64MCExpr::VK_AARCH64_TPREL_G1,
462 AArch64MCExpr::VK_AARCH64_TPREL_G0,
463 };
464 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
465
466 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
467 }
468
469
470 bool isMOVZ32Imm() const {
471 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
472 AArch64MCExpr::VK_AARCH64_ABS_G0,
473 AArch64MCExpr::VK_AARCH64_ABS_G1,
474 AArch64MCExpr::VK_AARCH64_SABS_G0,
475 AArch64MCExpr::VK_AARCH64_SABS_G1,
476 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
477 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
478 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
479 AArch64MCExpr::VK_AARCH64_TPREL_G1,
480 AArch64MCExpr::VK_AARCH64_TPREL_G0,
481 };
482 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
483
484 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
485 }
486
487 bool isMOVZ64Imm() const {
488 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
489 AArch64MCExpr::VK_AARCH64_ABS_G0,
490 AArch64MCExpr::VK_AARCH64_ABS_G1,
491 AArch64MCExpr::VK_AARCH64_ABS_G2,
492 AArch64MCExpr::VK_AARCH64_ABS_G3,
493 AArch64MCExpr::VK_AARCH64_SABS_G0,
494 AArch64MCExpr::VK_AARCH64_SABS_G1,
495 AArch64MCExpr::VK_AARCH64_SABS_G2,
496 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
497 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
498 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
499 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
500 AArch64MCExpr::VK_AARCH64_TPREL_G2,
501 AArch64MCExpr::VK_AARCH64_TPREL_G1,
502 AArch64MCExpr::VK_AARCH64_TPREL_G0,
503 };
504 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
505
506 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
507 }
508
509 bool isMOVK32Imm() const {
510 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
511 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
512 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
513 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
514 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
515 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
516 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
517 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
518 };
519 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
520
521 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
522 }
523
524 bool isMOVK64Imm() const {
525 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
526 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
527 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
528 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
529 AArch64MCExpr::VK_AARCH64_ABS_G3,
530 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
531 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
532 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
533 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
534 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
535 };
536 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
537
538 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
539 }
540
541 bool isMoveWideImm(unsigned RegWidth,
542 AArch64MCExpr::VariantKind *PermittedModifiers,
543 unsigned NumModifiers) const {
544 if (!isImmWithLSL()) return false;
545
546 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
547 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
548
549 AArch64MCExpr::VariantKind Modifier;
550 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
551 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
552 if (!ImmWithLSL.ImplicitAmount) return false;
553
554 for (unsigned i = 0; i < NumModifiers; ++i)
555 if (PermittedModifiers[i] == Modifier) return true;
556
557 return false;
558 }
559
560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
561 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
562 }
563
564 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
565 bool isMoveWideMovAlias() const {
566 if (!isImm()) return false;
567
568 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
569 if (!CE) return false;
570
571 int UImm16, Shift;
572 uint64_t Value = CE->getValue();
573
574 // If this is a 32-bit instruction then all bits above 32 should be the
575 // same: either of these is fine because signed/unsigned values should be
576 // permitted.
577 if (RegWidth == 32) {
578 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
579 return false;
580
581 Value &= 0xffffffffULL;
582 }
583
584 return isValidImm(RegWidth, Value, UImm16, Shift);
585 }
586
587 bool isMSRWithReg() const {
588 if (!isSysReg()) return false;
589
590 bool IsKnownRegister;
591 StringRef Name(SysReg.Data, SysReg.Length);
592 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
593
594 return IsKnownRegister;
595 }
596
597 bool isMSRPState() const {
598 if (!isSysReg()) return false;
599
600 bool IsKnownRegister;
601 StringRef Name(SysReg.Data, SysReg.Length);
602 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
603
604 return IsKnownRegister;
605 }
606
607 bool isMRS() const {
608 if (!isSysReg()) return false;
609
610 // First check against specific MSR-only (write-only) registers
611 bool IsKnownRegister;
612 StringRef Name(SysReg.Data, SysReg.Length);
613 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
614
615 return IsKnownRegister;
616 }
617
618 bool isPRFM() const {
619 if (!isImm()) return false;
620
621 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
622
623 if (!CE)
624 return false;
625
626 return CE->getValue() >= 0 && CE->getValue() <= 31;
627 }
628
629 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
630 if (!isShiftOrExtend()) return false;
631
632 if (ShiftExtend.ShiftType != SHKind)
633 return false;
634
635 return ShiftExtend.Amount <= 4;
636 }
637
638 bool isRegExtendLSL() const {
639 if (!isShiftOrExtend()) return false;
640
641 if (ShiftExtend.ShiftType != A64SE::LSL)
642 return false;
643
644 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
645 }
646
647 template<int MemSize> bool isSImm7Scaled() const {
648 if (!isImm()) return false;
649
650 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651 if (!CE) return false;
652
653 int64_t Val = CE->getValue();
654 if (Val % MemSize != 0) return false;
655
656 Val /= MemSize;
657
658 return Val >= -64 && Val < 64;
659 }
660
661 template<int BitWidth>
662 bool isSImm() const {
663 if (!isImm()) return false;
664
665 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666 if (!CE) return false;
667
668 return CE->getValue() >= -(1LL << (BitWidth - 1))
669 && CE->getValue() < (1LL << (BitWidth - 1));
670 }
671
672 template<int bitWidth>
673 bool isUImm() const {
674 if (!isImm()) return false;
675
676 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
677 if (!CE) return false;
678
679 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
680 }
681
682 bool isUImm() const {
683 if (!isImm()) return false;
684
685 return isa<MCConstantExpr>(getImm());
686 }
687
688 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
689 unsigned ShiftAmount,
690 bool ImplicitAmount,
691 SMLoc S, SMLoc E) {
692 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
693 Op->ImmWithLSL.Val = Val;
694 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
695 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
696 return Op;
697 }
698
699 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
700 SMLoc S, SMLoc E) {
701 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
702 Op->CondCode.Code = Code;
703 return Op;
704 }
705
706 static AArch64Operand *CreateFPImm(double Val,
707 SMLoc S, SMLoc E) {
708 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
709 Op->FPImm.Val = Val;
710 return Op;
711 }
712
713 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
714 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
715 Op->Imm.Val = Val;
716 return Op;
717 }
718
719 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
720 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
721 Op->Reg.RegNum = RegNum;
722 return Op;
723 }
724
725 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
726 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
727 Op->Reg.RegNum = RegNum;
728 return Op;
729 }
730
731 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
732 unsigned Amount,
733 bool ImplicitAmount,
734 SMLoc S, SMLoc E) {
735 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
736 Op->ShiftExtend.ShiftType = ShiftTyp;
737 Op->ShiftExtend.Amount = Amount;
738 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
739 return Op;
740 }
741
742 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
743 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
744 Op->Tok.Data = Str.data();
745 Op->Tok.Length = Str.size();
746 return Op;
747 }
748
749 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
750 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
751 Op->Tok.Data = Str.data();
752 Op->Tok.Length = Str.size();
753 return Op;
754 }
755
756
757 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
758 // Add as immediates when possible.
759 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
760 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
761 else
762 Inst.addOperand(MCOperand::CreateExpr(Expr));
763 }
764
765 template<unsigned RegWidth>
766 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
767 assert(N == 1 && "Invalid number of operands!");
768 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
769 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
770 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
771 }
772
773 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
774 assert(N == 1 && "Invalid number of operands!");
775 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
776 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
777 }
778
779 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
780 assert(N == 1 && "Invalid number of operands!");
781
782 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
783 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
784
785 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
786 }
787
788 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
789 assert(N == 1 && "Invalid number of operands!");
790 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
791 }
792
793 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
794 assert(N == 1 && "Invalid number of operands!");
795
796 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
797 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
798 }
799
800 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
801 assert(N == 1 && "Invalid number of operands!");
802
803 APFloat RealVal(FPImm.Val);
804 uint32_t ImmVal;
805 A64Imms::isFPImm(RealVal, ImmVal);
806
807 Inst.addOperand(MCOperand::CreateImm(ImmVal));
808 }
809
810 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
811 assert(N == 1 && "Invalid number of operands");
812 Inst.addOperand(MCOperand::CreateImm(0));
813 }
814
815 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
816 assert(N == 1 && "Invalid number of operands!");
817 unsigned Encoded = A64InvertCondCode(getCondCode());
818 Inst.addOperand(MCOperand::CreateImm(Encoded));
819 }
820
821 void addRegOperands(MCInst &Inst, unsigned N) const {
822 assert(N == 1 && "Invalid number of operands!");
823 Inst.addOperand(MCOperand::CreateReg(getReg()));
824 }
825
826 void addImmOperands(MCInst &Inst, unsigned N) const {
827 assert(N == 1 && "Invalid number of operands!");
828 addExpr(Inst, getImm());
829 }
830
831 template<int MemSize>
832 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
833 assert(N == 1 && "Invalid number of operands!");
834
835 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
836 uint64_t Val = CE->getValue() / MemSize;
837 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
838 }
839
840 template<int BitWidth>
841 void addSImmOperands(MCInst &Inst, unsigned N) const {
842 assert(N == 1 && "Invalid number of operands!");
843
844 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
845 uint64_t Val = CE->getValue();
846 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
847 }
848
849 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
850 assert (N == 1 && "Invalid number of operands!");
851
852 addExpr(Inst, ImmWithLSL.Val);
853 }
854
855 template<unsigned field_width, unsigned scale>
856 void addLabelOperands(MCInst &Inst, unsigned N) const {
857 assert(N == 1 && "Invalid number of operands!");
858
859 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
860
861 if (!CE) {
862 addExpr(Inst, Imm.Val);
863 return;
864 }
865
866 int64_t Val = CE->getValue();
867 assert(Val % scale == 0 && "Unaligned immediate in instruction");
868 Val /= scale;
869
870 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
871 }
872
873 template<int MemSize>
874 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
875 assert(N == 1 && "Invalid number of operands!");
876
877 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
878 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
879 } else {
880 Inst.addOperand(MCOperand::CreateExpr(getImm()));
881 }
882 }
883
884 template<unsigned RegWidth>
885 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
886 assert(N == 1 && "Invalid number of operands");
887 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
888
889 uint32_t Bits;
890 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
891
892 Inst.addOperand(MCOperand::CreateImm(Bits));
893 }
894
895 void addMRSOperands(MCInst &Inst, unsigned N) const {
896 assert(N == 1 && "Invalid number of operands!");
897
898 bool Valid;
899 StringRef Name(SysReg.Data, SysReg.Length);
900 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
901
902 Inst.addOperand(MCOperand::CreateImm(Bits));
903 }
904
905 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
906 assert(N == 1 && "Invalid number of operands!");
907
908 bool Valid;
909 StringRef Name(SysReg.Data, SysReg.Length);
910 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
911
912 Inst.addOperand(MCOperand::CreateImm(Bits));
913 }
914
915 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
916 assert(N == 1 && "Invalid number of operands!");
917
918 bool Valid;
919 StringRef Name(SysReg.Data, SysReg.Length);
920 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
921
922 Inst.addOperand(MCOperand::CreateImm(Bits));
923 }
924
925 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
926 assert(N == 2 && "Invalid number of operands!");
927
928 addExpr(Inst, ImmWithLSL.Val);
929
930 AArch64MCExpr::VariantKind Variant;
931 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
932 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
933 return;
934 }
935
936 // We know it's relocated
937 switch (Variant) {
938 case AArch64MCExpr::VK_AARCH64_ABS_G0:
939 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
940 case AArch64MCExpr::VK_AARCH64_SABS_G0:
941 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
942 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
943 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
944 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
945 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
946 Inst.addOperand(MCOperand::CreateImm(0));
947 break;
948 case AArch64MCExpr::VK_AARCH64_ABS_G1:
949 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
950 case AArch64MCExpr::VK_AARCH64_SABS_G1:
951 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
952 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
953 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
954 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
955 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
956 Inst.addOperand(MCOperand::CreateImm(1));
957 break;
958 case AArch64MCExpr::VK_AARCH64_ABS_G2:
959 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
960 case AArch64MCExpr::VK_AARCH64_SABS_G2:
961 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
962 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
963 Inst.addOperand(MCOperand::CreateImm(2));
964 break;
965 case AArch64MCExpr::VK_AARCH64_ABS_G3:
966 Inst.addOperand(MCOperand::CreateImm(3));
967 break;
968 default: llvm_unreachable("Inappropriate move wide relocation");
969 }
970 }
971
972 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
973 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
974 assert(N == 2 && "Invalid number of operands!");
975 int UImm16, Shift;
976
977 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
978 uint64_t Value = CE->getValue();
979
980 if (RegWidth == 32) {
981 Value &= 0xffffffffULL;
982 }
983
984 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
985 (void)Valid;
986 assert(Valid && "Invalid immediates should have been weeded out by now");
987
988 Inst.addOperand(MCOperand::CreateImm(UImm16));
989 Inst.addOperand(MCOperand::CreateImm(Shift));
990 }
991
992 void addPRFMOperands(MCInst &Inst, unsigned N) const {
993 assert(N == 1 && "Invalid number of operands!");
994
995 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
996 assert(CE->getValue() >= 0 && CE->getValue() <= 31
997 && "PRFM operand should be 5-bits");
998
999 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1000 }
1001
1002 // For Add-sub (extended register) operands.
1003 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1004 assert(N == 1 && "Invalid number of operands!");
1005
1006 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1007 }
1008
1009 // For the extend in load-store (register offset) instructions.
1010 template<unsigned MemSize>
1011 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1012 addAddrRegExtendOperands(Inst, N, MemSize);
1013 }
1014
1015 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1016 unsigned MemSize) const {
1017 assert(N == 1 && "Invalid number of operands!");
1018
1019 // First bit of Option is set in instruction classes, the high two bits are
1020 // as follows:
1021 unsigned OptionHi = 0;
1022 switch (ShiftExtend.ShiftType) {
1023 case A64SE::UXTW:
1024 case A64SE::LSL:
1025 OptionHi = 1;
1026 break;
1027 case A64SE::SXTW:
1028 case A64SE::SXTX:
1029 OptionHi = 3;
1030 break;
1031 default:
1032 llvm_unreachable("Invalid extend type for register offset");
1033 }
1034
1035 unsigned S = 0;
1036 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1037 S = 1;
1038 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1039 S = 1;
1040
1041 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1042 }
1043 void addShiftOperands(MCInst &Inst, unsigned N) const {
1044 assert(N == 1 && "Invalid number of operands!");
1045
1046 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1047 }
1048};
1049
1050} // end anonymous namespace.
1051
1052AArch64AsmParser::OperandMatchResultTy
1053AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1054 StringRef Mnemonic) {
1055
1056 // See if the operand has a custom parser
1057 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1058
1059 // It could either succeed, fail or just not care.
1060 if (ResTy != MatchOperand_NoMatch)
1061 return ResTy;
1062
1063 switch (getLexer().getKind()) {
1064 default:
1065 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1066 return MatchOperand_ParseFail;
1067 case AsmToken::Identifier: {
1068 // It might be in the LSL/UXTB family ...
1069 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1070
1071 // We can only continue if no tokens were eaten.
1072 if (GotShift != MatchOperand_NoMatch)
1073 return GotShift;
1074
1075 // ... or it might be a register ...
1076 uint32_t NumLanes = 0;
1077 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1078 assert(GotReg != MatchOperand_ParseFail
1079 && "register parsing shouldn't partially succeed");
1080
1081 if (GotReg == MatchOperand_Success) {
1082 if (Parser.getTok().is(AsmToken::LBrac))
1083 return ParseNEONLane(Operands, NumLanes);
1084 else
1085 return MatchOperand_Success;
1086 }
1087
1088 // ... or it might be a symbolish thing
1089 }
1090 // Fall through
1091 case AsmToken::LParen: // E.g. (strcmp-4)
1092 case AsmToken::Integer: // 1f, 2b labels
1093 case AsmToken::String: // quoted labels
1094 case AsmToken::Dot: // . is Current location
1095 case AsmToken::Dollar: // $ is PC
1096 case AsmToken::Colon: {
1097 SMLoc StartLoc = Parser.getTok().getLoc();
1098 SMLoc EndLoc;
1099 const MCExpr *ImmVal = 0;
1100
1101 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1102 return MatchOperand_ParseFail;
1103
1104 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1105 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1106 return MatchOperand_Success;
1107 }
1108 case AsmToken::Hash: { // Immediates
1109 SMLoc StartLoc = Parser.getTok().getLoc();
1110 SMLoc EndLoc;
1111 const MCExpr *ImmVal = 0;
1112 Parser.Lex();
1113
1114 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1115 return MatchOperand_ParseFail;
1116
1117 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1118 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1119 return MatchOperand_Success;
1120 }
1121 case AsmToken::LBrac: {
1122 SMLoc Loc = Parser.getTok().getLoc();
1123 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1124 Parser.Lex(); // Eat '['
1125
1126 // There's no comma after a '[', so we can parse the next operand
1127 // immediately.
1128 return ParseOperand(Operands, Mnemonic);
1129 }
1130 // The following will likely be useful later, but not in very early cases
1131 case AsmToken::LCurly: // Weird SIMD lists
1132 llvm_unreachable("Don't know how to deal with '{' in operand");
1133 return MatchOperand_ParseFail;
1134 }
1135}
1136
1137AArch64AsmParser::OperandMatchResultTy
1138AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1139 if (getLexer().is(AsmToken::Colon)) {
1140 AArch64MCExpr::VariantKind RefKind;
1141
1142 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1143 if (ResTy != MatchOperand_Success)
1144 return ResTy;
1145
1146 const MCExpr *SubExprVal;
1147 if (getParser().ParseExpression(SubExprVal))
1148 return MatchOperand_ParseFail;
1149
1150 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1151 return MatchOperand_Success;
1152 }
1153
1154 // No weird AArch64MCExpr prefix
1155 return getParser().ParseExpression(ExprVal)
1156 ? MatchOperand_ParseFail : MatchOperand_Success;
1157}
1158
1159// A lane attached to a NEON register. "[N]", which should yield three tokens:
1160// '[', N, ']'. A hash is not allowed to precede the immediate here.
1161AArch64AsmParser::OperandMatchResultTy
1162AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1163 uint32_t NumLanes) {
1164 SMLoc Loc = Parser.getTok().getLoc();
1165
1166 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1167 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1168 Parser.Lex(); // Eat '['
1169
1170 if (Parser.getTok().isNot(AsmToken::Integer)) {
1171 Error(Parser.getTok().getLoc(), "expected lane number");
1172 return MatchOperand_ParseFail;
1173 }
1174
1175 if (Parser.getTok().getIntVal() >= NumLanes) {
1176 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1177 return MatchOperand_ParseFail;
1178 }
1179
1180 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1181 getContext());
1182 SMLoc S = Parser.getTok().getLoc();
1183 Parser.Lex(); // Eat actual lane
1184 SMLoc E = Parser.getTok().getLoc();
1185 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1186
1187
1188 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1189 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1190 return MatchOperand_ParseFail;
1191 }
1192
1193 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1194 Parser.Lex(); // Eat ']'
1195
1196 return MatchOperand_Success;
1197}
1198
1199AArch64AsmParser::OperandMatchResultTy
1200AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1201 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1202 Parser.Lex();
1203
1204 if (getLexer().isNot(AsmToken::Identifier)) {
1205 Error(Parser.getTok().getLoc(),
1206 "expected relocation specifier in operand after ':'");
1207 return MatchOperand_ParseFail;
1208 }
1209
Tim Northover24937c12013-02-04 15:44:38 +00001210 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1211 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
Tim Northovere0e3aef2013-01-31 12:12:40 +00001212 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1213 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1214 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1215 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1216 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1217 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1218 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1219 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1220 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1221 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1222 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1223 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1224 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1225 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1226 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1227 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1228 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1229 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1230 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1231 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1232 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1233 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1234 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1235 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1236 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1237 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1238 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1239 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1240 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1241 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1242 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1243 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1244 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1245 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1246 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1247 .Default(AArch64MCExpr::VK_AARCH64_None);
1248
1249 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1250 Error(Parser.getTok().getLoc(),
1251 "expected relocation specifier in operand after ':'");
1252 return MatchOperand_ParseFail;
1253 }
1254 Parser.Lex(); // Eat identifier
1255
1256 if (getLexer().isNot(AsmToken::Colon)) {
1257 Error(Parser.getTok().getLoc(),
1258 "expected ':' after relocation specifier");
1259 return MatchOperand_ParseFail;
1260 }
1261 Parser.Lex();
1262 return MatchOperand_Success;
1263}
1264
1265AArch64AsmParser::OperandMatchResultTy
1266AArch64AsmParser::ParseImmWithLSLOperand(
1267 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1268 // FIXME?: I want to live in a world where immediates must start with
1269 // #. Please don't dash my hopes (well, do if you have a good reason).
1270 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1271
1272 SMLoc S = Parser.getTok().getLoc();
1273 Parser.Lex(); // Eat '#'
1274
1275 const MCExpr *Imm;
1276 if (ParseImmediate(Imm) != MatchOperand_Success)
1277 return MatchOperand_ParseFail;
1278 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1279 SMLoc E = Parser.getTok().getLoc();
1280 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1281 return MatchOperand_Success;
1282 }
1283
1284 // Eat ','
1285 Parser.Lex();
1286
1287 // The optional operand must be "lsl #N" where N is non-negative.
1288 if (Parser.getTok().is(AsmToken::Identifier)
1289 && Parser.getTok().getIdentifier().lower() == "lsl") {
1290 Parser.Lex();
1291
1292 if (Parser.getTok().is(AsmToken::Hash)) {
1293 Parser.Lex();
1294
1295 if (Parser.getTok().isNot(AsmToken::Integer)) {
1296 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1297 return MatchOperand_ParseFail;
1298 }
1299 }
1300 }
1301
1302 int64_t ShiftAmount = Parser.getTok().getIntVal();
1303
1304 if (ShiftAmount < 0) {
1305 Error(Parser.getTok().getLoc(), "positive shift amount required");
1306 return MatchOperand_ParseFail;
1307 }
1308 Parser.Lex(); // Eat the number
1309
1310 SMLoc E = Parser.getTok().getLoc();
1311 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1312 false, S, E));
1313 return MatchOperand_Success;
1314}
1315
1316
1317AArch64AsmParser::OperandMatchResultTy
1318AArch64AsmParser::ParseCondCodeOperand(
1319 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1320 if (Parser.getTok().isNot(AsmToken::Identifier))
1321 return MatchOperand_NoMatch;
1322
1323 StringRef Tok = Parser.getTok().getIdentifier();
1324 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1325
1326 if (CondCode == A64CC::Invalid)
1327 return MatchOperand_NoMatch;
1328
1329 SMLoc S = Parser.getTok().getLoc();
1330 Parser.Lex(); // Eat condition code
1331 SMLoc E = Parser.getTok().getLoc();
1332
1333 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1334 return MatchOperand_Success;
1335}
1336
1337AArch64AsmParser::OperandMatchResultTy
1338AArch64AsmParser::ParseCRxOperand(
1339 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1340 SMLoc S = Parser.getTok().getLoc();
1341 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1342 Error(S, "Expected cN operand where 0 <= N <= 15");
1343 return MatchOperand_ParseFail;
1344 }
1345
Tim Northover24937c12013-02-04 15:44:38 +00001346 std::string LowerTok = Parser.getTok().getIdentifier().lower();
1347 StringRef Tok(LowerTok);
Tim Northovere0e3aef2013-01-31 12:12:40 +00001348 if (Tok[0] != 'c') {
1349 Error(S, "Expected cN operand where 0 <= N <= 15");
1350 return MatchOperand_ParseFail;
1351 }
1352
1353 uint32_t CRNum;
1354 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1355 if (BadNum || CRNum > 15) {
1356 Error(S, "Expected cN operand where 0 <= N <= 15");
1357 return MatchOperand_ParseFail;
1358 }
1359
1360 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1361
1362 Parser.Lex();
1363 SMLoc E = Parser.getTok().getLoc();
1364
1365 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1366 return MatchOperand_Success;
1367}
1368
1369AArch64AsmParser::OperandMatchResultTy
1370AArch64AsmParser::ParseFPImmOperand(
1371 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1372
1373 // FIXME?: I want to live in a world where immediates must start with
1374 // #. Please don't dash my hopes (well, do if you have a good reason).
1375 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1376
1377 SMLoc S = Parser.getTok().getLoc();
1378 Parser.Lex(); // Eat '#'
1379
1380 bool Negative = false;
1381 if (Parser.getTok().is(AsmToken::Minus)) {
1382 Negative = true;
1383 Parser.Lex(); // Eat '-'
1384 } else if (Parser.getTok().is(AsmToken::Plus)) {
1385 Parser.Lex(); // Eat '+'
1386 }
1387
1388 if (Parser.getTok().isNot(AsmToken::Real)) {
1389 Error(S, "Expected floating-point immediate");
1390 return MatchOperand_ParseFail;
1391 }
1392
1393 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1394 if (Negative) RealVal.changeSign();
1395 double DblVal = RealVal.convertToDouble();
1396
1397 Parser.Lex(); // Eat real number
1398 SMLoc E = Parser.getTok().getLoc();
1399
1400 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1401 return MatchOperand_Success;
1402}
1403
1404
1405// Automatically generated
1406static unsigned MatchRegisterName(StringRef Name);
1407
1408bool
1409AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1410 StringRef &Layout,
1411 SMLoc &LayoutLoc) const {
1412 const AsmToken &Tok = Parser.getTok();
1413
1414 if (Tok.isNot(AsmToken::Identifier))
1415 return false;
1416
1417 std::string LowerReg = Tok.getString().lower();
1418 size_t DotPos = LowerReg.find('.');
1419
1420 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1421 if (RegNum == AArch64::NoRegister) {
1422 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1423 .Case("ip0", AArch64::X16)
1424 .Case("ip1", AArch64::X17)
1425 .Case("fp", AArch64::X29)
1426 .Case("lr", AArch64::X30)
1427 .Default(AArch64::NoRegister);
1428 }
1429 if (RegNum == AArch64::NoRegister)
1430 return false;
1431
1432 SMLoc S = Tok.getLoc();
1433 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1434
1435 if (DotPos == StringRef::npos) {
1436 Layout = StringRef();
1437 } else {
1438 // Everything afterwards needs to be a literal token, expected to be
1439 // '.2d','.b' etc for vector registers.
1440
1441 // This StringSwitch validates the input and (perhaps more importantly)
1442 // gives us a permanent string to use in the token (a pointer into LowerReg
1443 // would go out of scope when we return).
1444 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1445 Layout = LowerReg.substr(DotPos, StringRef::npos);
1446 Layout = StringSwitch<const char *>(Layout)
1447 .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1448 .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1449 .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1450 .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1451 .Default("");
1452
1453 if (Layout.size() == 0) {
1454 // Malformed register
1455 return false;
1456 }
1457 }
1458
1459 return true;
1460}
1461
1462AArch64AsmParser::OperandMatchResultTy
1463AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1464 uint32_t &NumLanes) {
1465 unsigned RegNum;
1466 StringRef Layout;
1467 SMLoc RegEndLoc, LayoutLoc;
1468 SMLoc S = Parser.getTok().getLoc();
1469
1470 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1471 return MatchOperand_NoMatch;
1472
1473 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1474
1475 if (Layout.size() != 0) {
1476 unsigned long long TmpLanes = 0;
1477 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1478 if (TmpLanes != 0) {
1479 NumLanes = TmpLanes;
1480 } else {
1481 // If the number of lanes isn't specified explicitly, a valid instruction
1482 // will have an element specifier and be capable of acting on the entire
1483 // vector register.
1484 switch (Layout.back()) {
1485 default: llvm_unreachable("Invalid layout specifier");
1486 case 'b': NumLanes = 16; break;
1487 case 'h': NumLanes = 8; break;
1488 case 's': NumLanes = 4; break;
1489 case 'd': NumLanes = 2; break;
1490 }
1491 }
1492
1493 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1494 }
1495
1496 Parser.Lex();
1497 return MatchOperand_Success;
1498}
1499
1500bool
1501AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1502 SMLoc &EndLoc) {
1503 // This callback is used for things like DWARF frame directives in
1504 // assembly. They don't care about things like NEON layouts or lanes, they
1505 // just want to be able to produce the DWARF register number.
1506 StringRef LayoutSpec;
1507 SMLoc RegEndLoc, LayoutLoc;
1508 StartLoc = Parser.getTok().getLoc();
1509
1510 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1511 return true;
1512
1513 Parser.Lex();
1514 EndLoc = Parser.getTok().getLoc();
1515
1516 return false;
1517}
1518
1519AArch64AsmParser::OperandMatchResultTy
1520AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1521 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1522 // Since these operands occur in very limited circumstances, without
1523 // alternatives, we actually signal an error if there is no match. If relaxing
1524 // this, beware of unintended consequences: an immediate will be accepted
1525 // during matching, no matter how it gets into the AArch64Operand.
1526 const AsmToken &Tok = Parser.getTok();
1527 SMLoc S = Tok.getLoc();
1528
1529 if (Tok.is(AsmToken::Identifier)) {
1530 bool ValidName;
1531 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1532
1533 if (!ValidName) {
1534 Error(S, "operand specifier not recognised");
1535 return MatchOperand_ParseFail;
1536 }
1537
1538 Parser.Lex(); // We're done with the identifier. Eat it
1539
1540 SMLoc E = Parser.getTok().getLoc();
1541 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1542 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1543 return MatchOperand_Success;
1544 } else if (Tok.is(AsmToken::Hash)) {
1545 Parser.Lex();
1546
1547 const MCExpr *ImmVal;
1548 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1549 return MatchOperand_ParseFail;
1550
1551 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1552 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1553 Error(S, "Invalid immediate for instruction");
1554 return MatchOperand_ParseFail;
1555 }
1556
1557 SMLoc E = Parser.getTok().getLoc();
1558 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1559 return MatchOperand_Success;
1560 }
1561
1562 Error(S, "unexpected operand for instruction");
1563 return MatchOperand_ParseFail;
1564}
1565
1566AArch64AsmParser::OperandMatchResultTy
1567AArch64AsmParser::ParseSysRegOperand(
1568 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1569 const AsmToken &Tok = Parser.getTok();
1570
1571 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1572 // kind of string: SPSel is valid for two different forms of MSR with two
1573 // different encodings. There's no collision at the moment, but the potential
1574 // is there.
1575 if (!Tok.is(AsmToken::Identifier)) {
1576 return MatchOperand_NoMatch;
1577 }
1578
1579 SMLoc S = Tok.getLoc();
1580 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1581 Parser.Lex(); // Eat identifier
1582
1583 return MatchOperand_Success;
1584}
1585
1586AArch64AsmParser::OperandMatchResultTy
1587AArch64AsmParser::ParseLSXAddressOperand(
1588 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1589 SMLoc S = Parser.getTok().getLoc();
1590
1591 unsigned RegNum;
1592 SMLoc RegEndLoc, LayoutLoc;
1593 StringRef Layout;
1594 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1595 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1596 || Layout.size() != 0) {
1597 // Check Layout.size because we don't want to let "x3.4s" or similar
1598 // through.
1599 return MatchOperand_NoMatch;
1600 }
1601 Parser.Lex(); // Eat register
1602
1603 if (Parser.getTok().is(AsmToken::RBrac)) {
1604 // We're done
1605 SMLoc E = Parser.getTok().getLoc();
1606 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1607 return MatchOperand_Success;
1608 }
1609
1610 // Otherwise, only ", #0" is valid
1611
1612 if (Parser.getTok().isNot(AsmToken::Comma)) {
1613 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1614 return MatchOperand_ParseFail;
1615 }
1616 Parser.Lex(); // Eat ','
1617
1618 if (Parser.getTok().isNot(AsmToken::Hash)) {
1619 Error(Parser.getTok().getLoc(), "expected '#0'");
1620 return MatchOperand_ParseFail;
1621 }
1622 Parser.Lex(); // Eat '#'
1623
1624 if (Parser.getTok().isNot(AsmToken::Integer)
1625 || Parser.getTok().getIntVal() != 0 ) {
1626 Error(Parser.getTok().getLoc(), "expected '#0'");
1627 return MatchOperand_ParseFail;
1628 }
1629 Parser.Lex(); // Eat '0'
1630
1631 SMLoc E = Parser.getTok().getLoc();
1632 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1633 return MatchOperand_Success;
1634}
1635
1636AArch64AsmParser::OperandMatchResultTy
1637AArch64AsmParser::ParseShiftExtend(
1638 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1639 StringRef IDVal = Parser.getTok().getIdentifier();
1640 std::string LowerID = IDVal.lower();
1641
1642 A64SE::ShiftExtSpecifiers Spec =
1643 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1644 .Case("lsl", A64SE::LSL)
1645 .Case("lsr", A64SE::LSR)
1646 .Case("asr", A64SE::ASR)
1647 .Case("ror", A64SE::ROR)
1648 .Case("uxtb", A64SE::UXTB)
1649 .Case("uxth", A64SE::UXTH)
1650 .Case("uxtw", A64SE::UXTW)
1651 .Case("uxtx", A64SE::UXTX)
1652 .Case("sxtb", A64SE::SXTB)
1653 .Case("sxth", A64SE::SXTH)
1654 .Case("sxtw", A64SE::SXTW)
1655 .Case("sxtx", A64SE::SXTX)
1656 .Default(A64SE::Invalid);
1657
1658 if (Spec == A64SE::Invalid)
1659 return MatchOperand_NoMatch;
1660
1661 // Eat the shift
1662 SMLoc S, E;
1663 S = Parser.getTok().getLoc();
1664 Parser.Lex();
1665
1666 if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
1667 Spec != A64SE::ASR && Spec != A64SE::ROR) {
1668 // The shift amount can be omitted for the extending versions, but not real
1669 // shifts:
1670 // add x0, x0, x0, uxtb
1671 // is valid, and equivalent to
1672 // add x0, x0, x0, uxtb #0
1673
1674 if (Parser.getTok().is(AsmToken::Comma) ||
1675 Parser.getTok().is(AsmToken::EndOfStatement) ||
1676 Parser.getTok().is(AsmToken::RBrac)) {
1677 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true, S, E));
1678 return MatchOperand_Success;
1679 }
1680 }
1681
1682 // Eat # at beginning of immediate
1683 if (!Parser.getTok().is(AsmToken::Hash)) {
1684 Error(Parser.getTok().getLoc(),
1685 "expected #imm after shift specifier");
1686 return MatchOperand_ParseFail;
1687 }
1688 Parser.Lex();
1689
1690 // Make sure we do actually have a number
1691 if (!Parser.getTok().is(AsmToken::Integer)) {
1692 Error(Parser.getTok().getLoc(),
1693 "expected integer shift amount");
1694 return MatchOperand_ParseFail;
1695 }
1696 unsigned Amount = Parser.getTok().getIntVal();
1697 Parser.Lex();
1698 E = Parser.getTok().getLoc();
1699
1700 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false, S, E));
1701
1702 return MatchOperand_Success;
1703}
1704
1705// FIXME: We would really like to be able to tablegen'erate this.
1706bool AArch64AsmParser::
1707validateInstruction(MCInst &Inst,
1708 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1709 switch (Inst.getOpcode()) {
1710 case AArch64::BFIwwii:
1711 case AArch64::BFIxxii:
1712 case AArch64::SBFIZwwii:
1713 case AArch64::SBFIZxxii:
1714 case AArch64::UBFIZwwii:
1715 case AArch64::UBFIZxxii: {
1716 unsigned ImmOps = Inst.getNumOperands() - 2;
1717 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1718 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1719
1720 if (ImmR == 0) {
1721 // Bitfield inserts are preferred disassembly if ImmS < ImmR. However,
1722 // there is this one case where insert is valid syntax but the bfx
1723 // disassembly should be used: e.g. "sbfiz w0, w0, #0, #1".
1724 return false;
1725 } else if (ImmS >= ImmR) {
1726 return Error(Operands[4]->getStartLoc(),
1727 "requested insert overflows register");
1728 }
1729 return false;
1730 }
1731 case AArch64::BFXILwwii:
1732 case AArch64::BFXILxxii:
1733 case AArch64::SBFXwwii:
1734 case AArch64::SBFXxxii:
1735 case AArch64::UBFXwwii:
1736 case AArch64::UBFXxxii: {
1737 unsigned ImmOps = Inst.getNumOperands() - 2;
1738 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1739 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1740 int64_t RegWidth = 0;
1741 switch (Inst.getOpcode()) {
1742 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1743 RegWidth = 64;
1744 break;
1745 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1746 RegWidth = 32;
1747 break;
1748 }
1749
1750 if (ImmS >= RegWidth || ImmS < ImmR) {
1751 return Error(Operands[4]->getStartLoc(),
1752 "requested extract overflows register");
1753 }
1754 return false;
1755 }
1756 case AArch64::ICix: {
1757 int64_t ImmVal = Inst.getOperand(0).getImm();
1758 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1759 if (!A64IC::NeedsRegister(ICOp)) {
1760 return Error(Operands[1]->getStartLoc(),
1761 "specified IC op does not use a register");
1762 }
1763 return false;
1764 }
1765 case AArch64::ICi: {
1766 int64_t ImmVal = Inst.getOperand(0).getImm();
1767 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1768 if (A64IC::NeedsRegister(ICOp)) {
1769 return Error(Operands[1]->getStartLoc(),
1770 "specified IC op requires a register");
1771 }
1772 return false;
1773 }
1774 case AArch64::TLBIix: {
1775 int64_t ImmVal = Inst.getOperand(0).getImm();
1776 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1777 if (!A64TLBI::NeedsRegister(TLBIOp)) {
1778 return Error(Operands[1]->getStartLoc(),
1779 "specified TLBI op does not use a register");
1780 }
1781 return false;
1782 }
1783 case AArch64::TLBIi: {
1784 int64_t ImmVal = Inst.getOperand(0).getImm();
1785 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1786 if (A64TLBI::NeedsRegister(TLBIOp)) {
1787 return Error(Operands[1]->getStartLoc(),
1788 "specified TLBI op requires a register");
1789 }
1790 return false;
1791 }
1792 }
1793
1794 return false;
1795}
1796
1797
1798// Parses the instruction *together with* all operands, appending each parsed
1799// operand to the "Operands" list
1800bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1801 StringRef Name, SMLoc NameLoc,
1802 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1803 size_t CondCodePos = Name.find('.');
1804
1805 StringRef Mnemonic = Name.substr(0, CondCodePos);
1806 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1807
1808 if (CondCodePos != StringRef::npos) {
1809 // We have a condition code
1810 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1811 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1812 A64CC::CondCodes Code;
1813
1814 Code = A64StringToCondCode(CondStr);
1815
1816 if (Code == A64CC::Invalid) {
1817 Error(S, "invalid condition code");
1818 Parser.EatToEndOfStatement();
1819 return true;
1820 }
1821
1822 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1823
1824 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
1825 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1826 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1827 }
1828
1829 // Now we parse the operands of this instruction
1830 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1831 // Read the first operand.
1832 if (ParseOperand(Operands, Mnemonic)) {
1833 Parser.EatToEndOfStatement();
1834 return true;
1835 }
1836
1837 while (getLexer().is(AsmToken::Comma)) {
1838 Parser.Lex(); // Eat the comma.
1839
1840 // Parse and remember the operand.
1841 if (ParseOperand(Operands, Mnemonic)) {
1842 Parser.EatToEndOfStatement();
1843 return true;
1844 }
1845
1846
1847 // After successfully parsing some operands there are two special cases to
1848 // consider (i.e. notional operands not separated by commas). Both are due
1849 // to memory specifiers:
1850 // + An RBrac will end an address for load/store/prefetch
1851 // + An '!' will indicate a pre-indexed operation.
1852 //
1853 // It's someone else's responsibility to make sure these tokens are sane
1854 // in the given context!
1855 if (Parser.getTok().is(AsmToken::RBrac)) {
1856 SMLoc Loc = Parser.getTok().getLoc();
1857 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1858 Parser.Lex();
1859 }
1860
1861 if (Parser.getTok().is(AsmToken::Exclaim)) {
1862 SMLoc Loc = Parser.getTok().getLoc();
1863 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
1864 Parser.Lex();
1865 }
1866 }
1867 }
1868
1869 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1870 SMLoc Loc = getLexer().getLoc();
1871 Parser.EatToEndOfStatement();
1872 return Error(Loc, "");
1873 }
1874
1875 // Eat the EndOfStatement
1876 Parser.Lex();
1877
1878 return false;
1879}
1880
1881bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
1882 StringRef IDVal = DirectiveID.getIdentifier();
1883 if (IDVal == ".hword")
1884 return ParseDirectiveWord(2, DirectiveID.getLoc());
1885 else if (IDVal == ".word")
1886 return ParseDirectiveWord(4, DirectiveID.getLoc());
1887 else if (IDVal == ".xword")
1888 return ParseDirectiveWord(8, DirectiveID.getLoc());
1889 else if (IDVal == ".tlsdesccall")
1890 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
1891
1892 return true;
1893}
1894
1895/// parseDirectiveWord
1896/// ::= .word [ expression (, expression)* ]
1897bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
1898 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1899 for (;;) {
1900 const MCExpr *Value;
1901 if (getParser().ParseExpression(Value))
1902 return true;
1903
1904 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
1905
1906 if (getLexer().is(AsmToken::EndOfStatement))
1907 break;
1908
1909 // FIXME: Improve diagnostic.
1910 if (getLexer().isNot(AsmToken::Comma))
1911 return Error(L, "unexpected token in directive");
1912 Parser.Lex();
1913 }
1914 }
1915
1916 Parser.Lex();
1917 return false;
1918}
1919
1920// parseDirectiveTLSDescCall:
1921// ::= .tlsdesccall symbol
1922bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
1923 StringRef Name;
1924 if (getParser().ParseIdentifier(Name))
1925 return Error(L, "expected symbol after directive");
1926
1927 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
1928 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
1929
1930 MCInst Inst;
1931 Inst.setOpcode(AArch64::TLSDESCCALL);
1932 Inst.addOperand(MCOperand::CreateExpr(Expr));
1933
1934 getParser().getStreamer().EmitInstruction(Inst);
1935 return false;
1936}
1937
1938
1939bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1940 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1941 MCStreamer &Out, unsigned &ErrorInfo,
1942 bool MatchingInlineAsm) {
1943 MCInst Inst;
1944 unsigned MatchResult;
1945 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
1946 MatchingInlineAsm);
1947 switch (MatchResult) {
1948 default: break;
1949 case Match_Success:
1950 if (validateInstruction(Inst, Operands))
1951 return true;
1952
1953 Out.EmitInstruction(Inst);
1954 return false;
1955 case Match_MissingFeature:
1956 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
1957 return true;
1958 case Match_InvalidOperand: {
1959 SMLoc ErrorLoc = IDLoc;
1960 if (ErrorInfo != ~0U) {
1961 if (ErrorInfo >= Operands.size())
1962 return Error(IDLoc, "too few operands for instruction");
1963
1964 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
1965 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
1966 }
1967
1968 return Error(ErrorLoc, "invalid operand for instruction");
1969 }
1970 case Match_MnemonicFail:
1971 return Error(IDLoc, "invalid instruction");
1972 }
1973
1974 llvm_unreachable("Implement any new match types added!");
1975 return true;
1976}
1977
1978void AArch64Operand::print(raw_ostream &OS) const {
1979 switch (Kind) {
1980 case k_CondCode:
1981 OS << "<CondCode: " << CondCode.Code << ">";
1982 break;
1983 case k_FPImmediate:
1984 OS << "<fpimm: " << FPImm.Val << ">";
1985 break;
1986 case k_ImmWithLSL:
1987 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
1988 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
1989 break;
1990 case k_Immediate:
1991 getImm()->print(OS);
1992 break;
1993 case k_Register:
1994 OS << "<register " << getReg() << '>';
1995 break;
1996 case k_Token:
1997 OS << '\'' << getToken() << '\'';
1998 break;
1999 case k_ShiftExtend:
2000 OS << "<shift: type=" << ShiftExtend.ShiftType
2001 << ", amount=" << ShiftExtend.Amount << ">";
2002 break;
2003 case k_SysReg: {
2004 StringRef Name(SysReg.Data, SysReg.Length);
2005 OS << "<sysreg: " << Name << '>';
2006 break;
2007 }
2008 default:
2009 llvm_unreachable("No idea how to print this kind of operand");
2010 break;
2011 }
2012}
2013
2014void AArch64Operand::dump() const {
2015 print(errs());
2016}
2017
2018
2019/// Force static initialization.
2020extern "C" void LLVMInitializeAArch64AsmParser() {
2021 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2022}
2023
2024#define GET_REGISTER_MATCHER
2025#define GET_MATCHER_IMPLEMENTATION
2026#include "AArch64GenAsmMatcher.inc"