blob: 1c92c8c07b83f9f492561eceac00add7d0e8d004 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "Utils/AArch64BaseInfo.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000013#include "llvm/ADT/APInt.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/SmallString.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/StringSwitch.h"
18#include "llvm/ADT/Twine.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000019#include "llvm/MC/MCContext.h"
20#include "llvm/MC/MCExpr.h"
21#include "llvm/MC/MCInst.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000022#include "llvm/MC/MCParser/MCAsmLexer.h"
23#include "llvm/MC/MCParser/MCAsmParser.h"
24#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCStreamer.h"
27#include "llvm/MC/MCSubtargetInfo.h"
28#include "llvm/MC/MCSymbol.h"
29#include "llvm/MC/MCTargetAsmParser.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000030#include "llvm/Support/ErrorHandling.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000031#include "llvm/Support/SourceMgr.h"
32#include "llvm/Support/TargetRegistry.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000033#include "llvm/Support/raw_ostream.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000034#include <cstdio>
35using namespace llvm;
36
37namespace {
38
39class AArch64Operand;
40
41class AArch64AsmParser : public MCTargetAsmParser {
Tim Northover3b0846e2014-05-24 12:50:23 +000042private:
43 StringRef Mnemonic; ///< Instruction mnemonic.
44 MCSubtargetInfo &STI;
45 MCAsmParser &Parser;
46
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +000047 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
49
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +000050 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
53 }
54
Tim Northover3b0846e2014-05-24 12:50:23 +000055 MCAsmParser &getParser() const { return Parser; }
56 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
57
58 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
59
60 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
61 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
62 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +000063 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
Tim Northover3b0846e2014-05-24 12:50:23 +000064 int tryParseRegister();
65 int tryMatchVectorRegister(StringRef &Kind, bool expected);
66 bool parseRegister(OperandVector &Operands);
67 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
68 bool parseVectorList(OperandVector &Operands);
69 bool parseOperand(OperandVector &Operands, bool isCondCode,
70 bool invertCondCode);
71
72 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
73 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
74 bool showMatchError(SMLoc Loc, unsigned ErrCode);
75
76 bool parseDirectiveWord(unsigned Size, SMLoc L);
77 bool parseDirectiveTLSDescCall(SMLoc L);
78
79 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +000080 bool parseDirectiveLtorg(SMLoc L);
Tim Northover3b0846e2014-05-24 12:50:23 +000081
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +000082 bool parseDirectiveReq(StringRef Name, SMLoc L);
83 bool parseDirectiveUnreq(SMLoc L);
84
Tim Northover3b0846e2014-05-24 12:50:23 +000085 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
87 OperandVector &Operands, MCStreamer &Out,
Tim Northover26bb14e2014-08-18 11:49:42 +000088 uint64_t &ErrorInfo,
Tim Northover3b0846e2014-05-24 12:50:23 +000089 bool MatchingInlineAsm) override;
90/// @name Auto-generated Match Functions
91/// {
92
93#define GET_ASSEMBLER_HEADER
94#include "AArch64GenAsmMatcher.inc"
95
96 /// }
97
98 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
110
111public:
112 enum AArch64MatchResultTy {
113 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
114#define GET_OPERAND_DIAGNOSTIC_TYPES
115#include "AArch64GenAsmMatcher.inc"
116 };
117 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
118 const MCInstrInfo &MII,
119 const MCTargetOptions &Options)
120 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
121 MCAsmParserExtension::Initialize(_Parser);
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +0000122 if (Parser.getStreamer().getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(Parser.getStreamer());
Tim Northover3b0846e2014-05-24 12:50:23 +0000124
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
127 }
128
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
David Blaikie960ea3f2014-06-08 16:18:35 +0000133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
Tim Northover3b0846e2014-05-24 12:50:23 +0000134 unsigned Kind) override;
135
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
139 int64_t &Addend);
140};
141} // end anonymous namespace
142
143namespace {
144
145/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146/// instruction.
147class AArch64Operand : public MCParsedAsmOperand {
148private:
149 enum KindTy {
150 k_Immediate,
151 k_ShiftedImm,
152 k_CondCode,
153 k_Register,
154 k_VectorList,
155 k_VectorIndex,
156 k_Token,
157 k_SysReg,
158 k_SysCR,
159 k_Prefetch,
160 k_ShiftExtend,
161 k_FPImm,
162 k_Barrier
163 } Kind;
164
165 SMLoc StartLoc, EndLoc;
166
167 struct TokOp {
168 const char *Data;
169 unsigned Length;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
171 };
172
173 struct RegOp {
174 unsigned RegNum;
175 bool isVector;
176 };
177
178 struct VectorListOp {
179 unsigned RegNum;
180 unsigned Count;
181 unsigned NumElements;
182 unsigned ElementKind;
183 };
184
185 struct VectorIndexOp {
186 unsigned Val;
187 };
188
189 struct ImmOp {
190 const MCExpr *Val;
191 };
192
193 struct ShiftedImmOp {
194 const MCExpr *Val;
195 unsigned ShiftAmount;
196 };
197
198 struct CondCodeOp {
199 AArch64CC::CondCode Code;
200 };
201
202 struct FPImmOp {
203 unsigned Val; // Encoded 8-bit representation.
204 };
205
206 struct BarrierOp {
207 unsigned Val; // Not the enum since not all values have names.
208 };
209
210 struct SysRegOp {
211 const char *Data;
212 unsigned Length;
213 uint64_t FeatureBits; // We need to pass through information about which
214 // core we are compiling for so that the SysReg
215 // Mappers can appropriately conditionalize.
216 };
217
218 struct SysCRImmOp {
219 unsigned Val;
220 };
221
222 struct PrefetchOp {
223 unsigned Val;
224 };
225
226 struct ShiftExtendOp {
227 AArch64_AM::ShiftExtendType Type;
228 unsigned Amount;
229 bool HasExplicitAmount;
230 };
231
232 struct ExtendOp {
233 unsigned Val;
234 };
235
236 union {
237 struct TokOp Tok;
238 struct RegOp Reg;
239 struct VectorListOp VectorList;
240 struct VectorIndexOp VectorIndex;
241 struct ImmOp Imm;
242 struct ShiftedImmOp ShiftedImm;
243 struct CondCodeOp CondCode;
244 struct FPImmOp FPImm;
245 struct BarrierOp Barrier;
246 struct SysRegOp SysReg;
247 struct SysCRImmOp SysCRImm;
248 struct PrefetchOp Prefetch;
249 struct ShiftExtendOp ShiftExtend;
250 };
251
252 // Keep the MCContext around as the MCExprs may need manipulated during
253 // the add<>Operands() calls.
254 MCContext &Ctx;
255
David Blaikie960ea3f2014-06-08 16:18:35 +0000256public:
Tim Northover3b0846e2014-05-24 12:50:23 +0000257 AArch64Operand(KindTy K, MCContext &_Ctx)
258 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
259
Tim Northover3b0846e2014-05-24 12:50:23 +0000260 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 Kind = o.Kind;
262 StartLoc = o.StartLoc;
263 EndLoc = o.EndLoc;
264 switch (Kind) {
265 case k_Token:
266 Tok = o.Tok;
267 break;
268 case k_Immediate:
269 Imm = o.Imm;
270 break;
271 case k_ShiftedImm:
272 ShiftedImm = o.ShiftedImm;
273 break;
274 case k_CondCode:
275 CondCode = o.CondCode;
276 break;
277 case k_FPImm:
278 FPImm = o.FPImm;
279 break;
280 case k_Barrier:
281 Barrier = o.Barrier;
282 break;
283 case k_Register:
284 Reg = o.Reg;
285 break;
286 case k_VectorList:
287 VectorList = o.VectorList;
288 break;
289 case k_VectorIndex:
290 VectorIndex = o.VectorIndex;
291 break;
292 case k_SysReg:
293 SysReg = o.SysReg;
294 break;
295 case k_SysCR:
296 SysCRImm = o.SysCRImm;
297 break;
298 case k_Prefetch:
299 Prefetch = o.Prefetch;
300 break;
301 case k_ShiftExtend:
302 ShiftExtend = o.ShiftExtend;
303 break;
304 }
305 }
306
307 /// getStartLoc - Get the location of the first token of this operand.
308 SMLoc getStartLoc() const override { return StartLoc; }
309 /// getEndLoc - Get the location of the last token of this operand.
310 SMLoc getEndLoc() const override { return EndLoc; }
311
312 StringRef getToken() const {
313 assert(Kind == k_Token && "Invalid access!");
314 return StringRef(Tok.Data, Tok.Length);
315 }
316
317 bool isTokenSuffix() const {
318 assert(Kind == k_Token && "Invalid access!");
319 return Tok.IsSuffix;
320 }
321
322 const MCExpr *getImm() const {
323 assert(Kind == k_Immediate && "Invalid access!");
324 return Imm.Val;
325 }
326
327 const MCExpr *getShiftedImmVal() const {
328 assert(Kind == k_ShiftedImm && "Invalid access!");
329 return ShiftedImm.Val;
330 }
331
332 unsigned getShiftedImmShift() const {
333 assert(Kind == k_ShiftedImm && "Invalid access!");
334 return ShiftedImm.ShiftAmount;
335 }
336
337 AArch64CC::CondCode getCondCode() const {
338 assert(Kind == k_CondCode && "Invalid access!");
339 return CondCode.Code;
340 }
341
342 unsigned getFPImm() const {
343 assert(Kind == k_FPImm && "Invalid access!");
344 return FPImm.Val;
345 }
346
347 unsigned getBarrier() const {
348 assert(Kind == k_Barrier && "Invalid access!");
349 return Barrier.Val;
350 }
351
352 unsigned getReg() const override {
353 assert(Kind == k_Register && "Invalid access!");
354 return Reg.RegNum;
355 }
356
357 unsigned getVectorListStart() const {
358 assert(Kind == k_VectorList && "Invalid access!");
359 return VectorList.RegNum;
360 }
361
362 unsigned getVectorListCount() const {
363 assert(Kind == k_VectorList && "Invalid access!");
364 return VectorList.Count;
365 }
366
367 unsigned getVectorIndex() const {
368 assert(Kind == k_VectorIndex && "Invalid access!");
369 return VectorIndex.Val;
370 }
371
372 StringRef getSysReg() const {
373 assert(Kind == k_SysReg && "Invalid access!");
374 return StringRef(SysReg.Data, SysReg.Length);
375 }
376
377 uint64_t getSysRegFeatureBits() const {
378 assert(Kind == k_SysReg && "Invalid access!");
379 return SysReg.FeatureBits;
380 }
381
382 unsigned getSysCR() const {
383 assert(Kind == k_SysCR && "Invalid access!");
384 return SysCRImm.Val;
385 }
386
387 unsigned getPrefetch() const {
388 assert(Kind == k_Prefetch && "Invalid access!");
389 return Prefetch.Val;
390 }
391
392 AArch64_AM::ShiftExtendType getShiftExtendType() const {
393 assert(Kind == k_ShiftExtend && "Invalid access!");
394 return ShiftExtend.Type;
395 }
396
397 unsigned getShiftExtendAmount() const {
398 assert(Kind == k_ShiftExtend && "Invalid access!");
399 return ShiftExtend.Amount;
400 }
401
402 bool hasShiftExtendAmount() const {
403 assert(Kind == k_ShiftExtend && "Invalid access!");
404 return ShiftExtend.HasExplicitAmount;
405 }
406
407 bool isImm() const override { return Kind == k_Immediate; }
408 bool isMem() const override { return false; }
409 bool isSImm9() const {
410 if (!isImm())
411 return false;
412 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
413 if (!MCE)
414 return false;
415 int64_t Val = MCE->getValue();
416 return (Val >= -256 && Val < 256);
417 }
418 bool isSImm7s4() const {
419 if (!isImm())
420 return false;
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
422 if (!MCE)
423 return false;
424 int64_t Val = MCE->getValue();
425 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
426 }
427 bool isSImm7s8() const {
428 if (!isImm())
429 return false;
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
431 if (!MCE)
432 return false;
433 int64_t Val = MCE->getValue();
434 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
435 }
436 bool isSImm7s16() const {
437 if (!isImm())
438 return false;
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
440 if (!MCE)
441 return false;
442 int64_t Val = MCE->getValue();
443 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
444 }
445
446 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
447 AArch64MCExpr::VariantKind ELFRefKind;
448 MCSymbolRefExpr::VariantKind DarwinRefKind;
449 int64_t Addend;
450 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
451 Addend)) {
452 // If we don't understand the expression, assume the best and
453 // let the fixup and relocation code deal with it.
454 return true;
455 }
456
457 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
458 ELFRefKind == AArch64MCExpr::VK_LO12 ||
459 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
461 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
463 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
464 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
465 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
466 // Note that we don't range-check the addend. It's adjusted modulo page
467 // size when converted, so there is no "out of range" condition when using
468 // @pageoff.
469 return Addend >= 0 && (Addend % Scale) == 0;
470 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
471 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
472 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
473 return Addend == 0;
474 }
475
476 return false;
477 }
478
479 template <int Scale> bool isUImm12Offset() const {
480 if (!isImm())
481 return false;
482
483 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 if (!MCE)
485 return isSymbolicUImm12Offset(getImm(), Scale);
486
487 int64_t Val = MCE->getValue();
488 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
489 }
490
491 bool isImm0_7() const {
492 if (!isImm())
493 return false;
494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
495 if (!MCE)
496 return false;
497 int64_t Val = MCE->getValue();
498 return (Val >= 0 && Val < 8);
499 }
500 bool isImm1_8() const {
501 if (!isImm())
502 return false;
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504 if (!MCE)
505 return false;
506 int64_t Val = MCE->getValue();
507 return (Val > 0 && Val < 9);
508 }
509 bool isImm0_15() const {
510 if (!isImm())
511 return false;
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513 if (!MCE)
514 return false;
515 int64_t Val = MCE->getValue();
516 return (Val >= 0 && Val < 16);
517 }
518 bool isImm1_16() const {
519 if (!isImm())
520 return false;
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522 if (!MCE)
523 return false;
524 int64_t Val = MCE->getValue();
525 return (Val > 0 && Val < 17);
526 }
527 bool isImm0_31() const {
528 if (!isImm())
529 return false;
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531 if (!MCE)
532 return false;
533 int64_t Val = MCE->getValue();
534 return (Val >= 0 && Val < 32);
535 }
536 bool isImm1_31() const {
537 if (!isImm())
538 return false;
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 if (!MCE)
541 return false;
542 int64_t Val = MCE->getValue();
543 return (Val >= 1 && Val < 32);
544 }
545 bool isImm1_32() const {
546 if (!isImm())
547 return false;
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549 if (!MCE)
550 return false;
551 int64_t Val = MCE->getValue();
552 return (Val >= 1 && Val < 33);
553 }
554 bool isImm0_63() const {
555 if (!isImm())
556 return false;
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 if (!MCE)
559 return false;
560 int64_t Val = MCE->getValue();
561 return (Val >= 0 && Val < 64);
562 }
563 bool isImm1_63() const {
564 if (!isImm())
565 return false;
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 if (!MCE)
568 return false;
569 int64_t Val = MCE->getValue();
570 return (Val >= 1 && Val < 64);
571 }
572 bool isImm1_64() const {
573 if (!isImm())
574 return false;
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576 if (!MCE)
577 return false;
578 int64_t Val = MCE->getValue();
579 return (Val >= 1 && Val < 65);
580 }
581 bool isImm0_127() const {
582 if (!isImm())
583 return false;
584 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
585 if (!MCE)
586 return false;
587 int64_t Val = MCE->getValue();
588 return (Val >= 0 && Val < 128);
589 }
590 bool isImm0_255() const {
591 if (!isImm())
592 return false;
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594 if (!MCE)
595 return false;
596 int64_t Val = MCE->getValue();
597 return (Val >= 0 && Val < 256);
598 }
599 bool isImm0_65535() const {
600 if (!isImm())
601 return false;
602 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603 if (!MCE)
604 return false;
605 int64_t Val = MCE->getValue();
606 return (Val >= 0 && Val < 65536);
607 }
608 bool isImm32_63() const {
609 if (!isImm())
610 return false;
611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
612 if (!MCE)
613 return false;
614 int64_t Val = MCE->getValue();
615 return (Val >= 32 && Val < 64);
616 }
617 bool isLogicalImm32() const {
618 if (!isImm())
619 return false;
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621 if (!MCE)
622 return false;
Arnaud A. de Grandmaisond7827602014-07-08 09:53:04 +0000623 int64_t Val = MCE->getValue();
624 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
625 return false;
626 Val &= 0xFFFFFFFF;
627 return AArch64_AM::isLogicalImmediate(Val, 32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000628 }
629 bool isLogicalImm64() const {
630 if (!isImm())
631 return false;
632 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
633 if (!MCE)
634 return false;
635 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
636 }
Arnaud A. de Grandmaisonf6432312014-07-10 15:12:26 +0000637 bool isLogicalImm32Not() const {
638 if (!isImm())
639 return false;
640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
641 if (!MCE)
642 return false;
643 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
644 return AArch64_AM::isLogicalImmediate(Val, 32);
645 }
646 bool isLogicalImm64Not() const {
647 if (!isImm())
648 return false;
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650 if (!MCE)
651 return false;
652 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
653 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000654 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
655 bool isAddSubImm() const {
656 if (!isShiftedImm() && !isImm())
657 return false;
658
659 const MCExpr *Expr;
660
661 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
662 if (isShiftedImm()) {
663 unsigned Shift = ShiftedImm.ShiftAmount;
664 Expr = ShiftedImm.Val;
665 if (Shift != 0 && Shift != 12)
666 return false;
667 } else {
668 Expr = getImm();
669 }
670
671 AArch64MCExpr::VariantKind ELFRefKind;
672 MCSymbolRefExpr::VariantKind DarwinRefKind;
673 int64_t Addend;
674 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
675 DarwinRefKind, Addend)) {
676 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
677 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
678 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
679 || ELFRefKind == AArch64MCExpr::VK_LO12
680 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
681 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
682 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
683 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
684 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
685 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
686 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
687 }
688
689 // Otherwise it should be a real immediate in range:
690 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
691 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
692 }
693 bool isCondCode() const { return Kind == k_CondCode; }
694 bool isSIMDImmType10() const {
695 if (!isImm())
696 return false;
697 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
698 if (!MCE)
699 return false;
700 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
701 }
702 bool isBranchTarget26() const {
703 if (!isImm())
704 return false;
705 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
706 if (!MCE)
707 return true;
708 int64_t Val = MCE->getValue();
709 if (Val & 0x3)
710 return false;
711 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
712 }
713 bool isPCRelLabel19() const {
714 if (!isImm())
715 return false;
716 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
717 if (!MCE)
718 return true;
719 int64_t Val = MCE->getValue();
720 if (Val & 0x3)
721 return false;
722 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
723 }
724 bool isBranchTarget14() const {
725 if (!isImm())
726 return false;
727 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
728 if (!MCE)
729 return true;
730 int64_t Val = MCE->getValue();
731 if (Val & 0x3)
732 return false;
733 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
734 }
735
736 bool
737 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
738 if (!isImm())
739 return false;
740
741 AArch64MCExpr::VariantKind ELFRefKind;
742 MCSymbolRefExpr::VariantKind DarwinRefKind;
743 int64_t Addend;
744 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
745 DarwinRefKind, Addend)) {
746 return false;
747 }
748 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
749 return false;
750
751 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
752 if (ELFRefKind == AllowedModifiers[i])
753 return Addend == 0;
754 }
755
756 return false;
757 }
758
759 bool isMovZSymbolG3() const {
760 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
761 return isMovWSymbol(Variants);
762 }
763
764 bool isMovZSymbolG2() const {
765 static AArch64MCExpr::VariantKind Variants[] = {
766 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
767 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
768 return isMovWSymbol(Variants);
769 }
770
771 bool isMovZSymbolG1() const {
772 static AArch64MCExpr::VariantKind Variants[] = {
773 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
774 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
775 AArch64MCExpr::VK_DTPREL_G1,
776 };
777 return isMovWSymbol(Variants);
778 }
779
780 bool isMovZSymbolG0() const {
781 static AArch64MCExpr::VariantKind Variants[] = {
782 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
783 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
784 return isMovWSymbol(Variants);
785 }
786
787 bool isMovKSymbolG3() const {
788 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
789 return isMovWSymbol(Variants);
790 }
791
792 bool isMovKSymbolG2() const {
793 static AArch64MCExpr::VariantKind Variants[] = {
794 AArch64MCExpr::VK_ABS_G2_NC};
795 return isMovWSymbol(Variants);
796 }
797
798 bool isMovKSymbolG1() const {
799 static AArch64MCExpr::VariantKind Variants[] = {
800 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC
802 };
803 return isMovWSymbol(Variants);
804 }
805
806 bool isMovKSymbolG0() const {
807 static AArch64MCExpr::VariantKind Variants[] = {
808 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
809 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
810 };
811 return isMovWSymbol(Variants);
812 }
813
814 template<int RegWidth, int Shift>
815 bool isMOVZMovAlias() const {
816 if (!isImm()) return false;
817
818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
819 if (!CE) return false;
820 uint64_t Value = CE->getValue();
821
822 if (RegWidth == 32)
823 Value &= 0xffffffffULL;
824
825 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
826 if (Value == 0 && Shift != 0)
827 return false;
828
829 return (Value & ~(0xffffULL << Shift)) == 0;
830 }
831
832 template<int RegWidth, int Shift>
833 bool isMOVNMovAlias() const {
834 if (!isImm()) return false;
835
836 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
837 if (!CE) return false;
838 uint64_t Value = CE->getValue();
839
840 // MOVZ takes precedence over MOVN.
841 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
842 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
843 return false;
844
845 Value = ~Value;
846 if (RegWidth == 32)
847 Value &= 0xffffffffULL;
848
849 return (Value & ~(0xffffULL << Shift)) == 0;
850 }
851
852 bool isFPImm() const { return Kind == k_FPImm; }
853 bool isBarrier() const { return Kind == k_Barrier; }
854 bool isSysReg() const { return Kind == k_SysReg; }
855 bool isMRSSystemRegister() const {
856 if (!isSysReg()) return false;
857
858 bool IsKnownRegister;
859 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
860 Mapper.fromString(getSysReg(), IsKnownRegister);
861
862 return IsKnownRegister;
863 }
864 bool isMSRSystemRegister() const {
865 if (!isSysReg()) return false;
866
867 bool IsKnownRegister;
868 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
869 Mapper.fromString(getSysReg(), IsKnownRegister);
870
871 return IsKnownRegister;
872 }
873 bool isSystemPStateField() const {
874 if (!isSysReg()) return false;
875
876 bool IsKnownRegister;
877 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
878
879 return IsKnownRegister;
880 }
881 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
882 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
883 bool isVectorRegLo() const {
884 return Kind == k_Register && Reg.isVector &&
885 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
886 Reg.RegNum);
887 }
888 bool isGPR32as64() const {
889 return Kind == k_Register && !Reg.isVector &&
890 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
891 }
892
893 bool isGPR64sp0() const {
894 return Kind == k_Register && !Reg.isVector &&
895 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
896 }
897
898 /// Is this a vector list with the type implicit (presumably attached to the
899 /// instruction itself)?
900 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
901 return Kind == k_VectorList && VectorList.Count == NumRegs &&
902 !VectorList.ElementKind;
903 }
904
905 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
906 bool isTypedVectorList() const {
907 if (Kind != k_VectorList)
908 return false;
909 if (VectorList.Count != NumRegs)
910 return false;
911 if (VectorList.ElementKind != ElementKind)
912 return false;
913 return VectorList.NumElements == NumElements;
914 }
915
916 bool isVectorIndex1() const {
917 return Kind == k_VectorIndex && VectorIndex.Val == 1;
918 }
919 bool isVectorIndexB() const {
920 return Kind == k_VectorIndex && VectorIndex.Val < 16;
921 }
922 bool isVectorIndexH() const {
923 return Kind == k_VectorIndex && VectorIndex.Val < 8;
924 }
925 bool isVectorIndexS() const {
926 return Kind == k_VectorIndex && VectorIndex.Val < 4;
927 }
928 bool isVectorIndexD() const {
929 return Kind == k_VectorIndex && VectorIndex.Val < 2;
930 }
931 bool isToken() const override { return Kind == k_Token; }
932 bool isTokenEqual(StringRef Str) const {
933 return Kind == k_Token && getToken() == Str;
934 }
935 bool isSysCR() const { return Kind == k_SysCR; }
936 bool isPrefetch() const { return Kind == k_Prefetch; }
937 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
938 bool isShifter() const {
939 if (!isShiftExtend())
940 return false;
941
942 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
943 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
944 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
945 ST == AArch64_AM::MSL);
946 }
947 bool isExtend() const {
948 if (!isShiftExtend())
949 return false;
950
951 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
952 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
953 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
954 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
955 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
956 ET == AArch64_AM::LSL) &&
957 getShiftExtendAmount() <= 4;
958 }
959
960 bool isExtend64() const {
961 if (!isExtend())
962 return false;
963 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
966 }
967 bool isExtendLSL64() const {
968 if (!isExtend())
969 return false;
970 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
971 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
972 ET == AArch64_AM::LSL) &&
973 getShiftExtendAmount() <= 4;
974 }
975
976 template<int Width> bool isMemXExtend() const {
977 if (!isExtend())
978 return false;
979 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
980 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
981 (getShiftExtendAmount() == Log2_32(Width / 8) ||
982 getShiftExtendAmount() == 0);
983 }
984
985 template<int Width> bool isMemWExtend() const {
986 if (!isExtend())
987 return false;
988 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
989 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
990 (getShiftExtendAmount() == Log2_32(Width / 8) ||
991 getShiftExtendAmount() == 0);
992 }
993
994 template <unsigned width>
995 bool isArithmeticShifter() const {
996 if (!isShifter())
997 return false;
998
999 // An arithmetic shifter is LSL, LSR, or ASR.
1000 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1001 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1002 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1003 }
1004
1005 template <unsigned width>
1006 bool isLogicalShifter() const {
1007 if (!isShifter())
1008 return false;
1009
1010 // A logical shifter is LSL, LSR, ASR or ROR.
1011 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1012 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1013 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1014 getShiftExtendAmount() < width;
1015 }
1016
1017 bool isMovImm32Shifter() const {
1018 if (!isShifter())
1019 return false;
1020
1021 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1022 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1023 if (ST != AArch64_AM::LSL)
1024 return false;
1025 uint64_t Val = getShiftExtendAmount();
1026 return (Val == 0 || Val == 16);
1027 }
1028
1029 bool isMovImm64Shifter() const {
1030 if (!isShifter())
1031 return false;
1032
1033 // A MOVi shifter is LSL of 0 or 16.
1034 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1035 if (ST != AArch64_AM::LSL)
1036 return false;
1037 uint64_t Val = getShiftExtendAmount();
1038 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1039 }
1040
1041 bool isLogicalVecShifter() const {
1042 if (!isShifter())
1043 return false;
1044
1045 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1046 unsigned Shift = getShiftExtendAmount();
1047 return getShiftExtendType() == AArch64_AM::LSL &&
1048 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1049 }
1050
1051 bool isLogicalVecHalfWordShifter() const {
1052 if (!isLogicalVecShifter())
1053 return false;
1054
1055 // A logical vector shifter is a left shift by 0 or 8.
1056 unsigned Shift = getShiftExtendAmount();
1057 return getShiftExtendType() == AArch64_AM::LSL &&
1058 (Shift == 0 || Shift == 8);
1059 }
1060
1061 bool isMoveVecShifter() const {
1062 if (!isShiftExtend())
1063 return false;
1064
1065 // A logical vector shifter is a left shift by 8 or 16.
1066 unsigned Shift = getShiftExtendAmount();
1067 return getShiftExtendType() == AArch64_AM::MSL &&
1068 (Shift == 8 || Shift == 16);
1069 }
1070
1071 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1072 // to LDUR/STUR when the offset is not legal for the former but is for
1073 // the latter. As such, in addition to checking for being a legal unscaled
1074 // address, also check that it is not a legal scaled address. This avoids
1075 // ambiguity in the matcher.
1076 template<int Width>
1077 bool isSImm9OffsetFB() const {
1078 return isSImm9() && !isUImm12Offset<Width / 8>();
1079 }
1080
1081 bool isAdrpLabel() const {
1082 // Validation was handled during parsing, so we just sanity check that
1083 // something didn't go haywire.
1084 if (!isImm())
1085 return false;
1086
1087 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1088 int64_t Val = CE->getValue();
1089 int64_t Min = - (4096 * (1LL << (21 - 1)));
1090 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1091 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1092 }
1093
1094 return true;
1095 }
1096
1097 bool isAdrLabel() const {
1098 // Validation was handled during parsing, so we just sanity check that
1099 // something didn't go haywire.
1100 if (!isImm())
1101 return false;
1102
1103 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1104 int64_t Val = CE->getValue();
1105 int64_t Min = - (1LL << (21 - 1));
1106 int64_t Max = ((1LL << (21 - 1)) - 1);
1107 return Val >= Min && Val <= Max;
1108 }
1109
1110 return true;
1111 }
1112
1113 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1114 // Add as immediates when possible. Null MCExpr = 0.
1115 if (!Expr)
1116 Inst.addOperand(MCOperand::CreateImm(0));
1117 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1118 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1119 else
1120 Inst.addOperand(MCOperand::CreateExpr(Expr));
1121 }
1122
1123 void addRegOperands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!");
1125 Inst.addOperand(MCOperand::CreateReg(getReg()));
1126 }
1127
1128 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1129 assert(N == 1 && "Invalid number of operands!");
1130 assert(
1131 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1132
1133 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1134 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1135 RI->getEncodingValue(getReg()));
1136
1137 Inst.addOperand(MCOperand::CreateReg(Reg));
1138 }
1139
1140 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1141 assert(N == 1 && "Invalid number of operands!");
1142 assert(
1143 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1144 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1145 }
1146
1147 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1148 assert(N == 1 && "Invalid number of operands!");
1149 assert(
1150 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1151 Inst.addOperand(MCOperand::CreateReg(getReg()));
1152 }
1153
1154 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 Inst.addOperand(MCOperand::CreateReg(getReg()));
1157 }
1158
1159 template <unsigned NumRegs>
1160 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1161 assert(N == 1 && "Invalid number of operands!");
1162 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1163 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1164 unsigned FirstReg = FirstRegs[NumRegs - 1];
1165
1166 Inst.addOperand(
1167 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1168 }
1169
1170 template <unsigned NumRegs>
1171 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1174 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1175 unsigned FirstReg = FirstRegs[NumRegs - 1];
1176
1177 Inst.addOperand(
1178 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1179 }
1180
1181 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1184 }
1185
1186 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1189 }
1190
1191 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1194 }
1195
1196 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1199 }
1200
1201 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1204 }
1205
1206 void addImmOperands(MCInst &Inst, unsigned N) const {
1207 assert(N == 1 && "Invalid number of operands!");
1208 // If this is a pageoff symrefexpr with an addend, adjust the addend
1209 // to be only the page-offset portion. Otherwise, just add the expr
1210 // as-is.
1211 addExpr(Inst, getImm());
1212 }
1213
1214 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1215 assert(N == 2 && "Invalid number of operands!");
1216 if (isShiftedImm()) {
1217 addExpr(Inst, getShiftedImmVal());
1218 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1219 } else {
1220 addExpr(Inst, getImm());
1221 Inst.addOperand(MCOperand::CreateImm(0));
1222 }
1223 }
1224
1225 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1226 assert(N == 1 && "Invalid number of operands!");
1227 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1228 }
1229
1230 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1231 assert(N == 1 && "Invalid number of operands!");
1232 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 if (!MCE)
1234 addExpr(Inst, getImm());
1235 else
1236 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1237 }
1238
1239 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1240 addImmOperands(Inst, N);
1241 }
1242
1243 template<int Scale>
1244 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1247
1248 if (!MCE) {
1249 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1250 return;
1251 }
1252 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1253 }
1254
1255 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1256 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001257 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001258 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1259 }
1260
1261 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001263 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001264 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1265 }
1266
1267 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1268 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001269 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001270 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1271 }
1272
1273 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001275 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001276 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1277 }
1278
1279 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1280 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001281 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001282 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1283 }
1284
1285 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1286 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001287 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001288 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 }
1290
1291 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1292 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001293 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001294 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 }
1296
1297 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1298 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001299 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001300 assert(MCE && "Invalid constant immediate operand!");
1301 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1302 }
1303
1304 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001306 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001307 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1308 }
1309
1310 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001312 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001313 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1314 }
1315
1316 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001318 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001319 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1320 }
1321
1322 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001324 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001325 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1326 }
1327
1328 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001330 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001331 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1332 }
1333
1334 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1335 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001336 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001337 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1338 }
1339
1340 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001343 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1344 }
1345
1346 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001349 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1350 }
1351
1352 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001354 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001355 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1356 }
1357
1358 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001360 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001361 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1362 }
1363
1364 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Arnaud A. de Grandmaisond7827602014-07-08 09:53:04 +00001367 uint64_t encoding =
1368 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
Tim Northover3b0846e2014-05-24 12:50:23 +00001369 Inst.addOperand(MCOperand::CreateImm(encoding));
1370 }
1371
1372 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001375 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1376 Inst.addOperand(MCOperand::CreateImm(encoding));
1377 }
1378
Arnaud A. de Grandmaisonf6432312014-07-10 15:12:26 +00001379 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1380 assert(N == 1 && "Invalid number of operands!");
1381 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1382 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1383 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1384 Inst.addOperand(MCOperand::CreateImm(encoding));
1385 }
1386
1387 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1388 assert(N == 1 && "Invalid number of operands!");
1389 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1390 uint64_t encoding =
1391 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1392 Inst.addOperand(MCOperand::CreateImm(encoding));
1393 }
1394
Tim Northover3b0846e2014-05-24 12:50:23 +00001395 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1396 assert(N == 1 && "Invalid number of operands!");
Arnaud A. de Grandmaisond3d67162014-07-17 19:08:14 +00001397 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00001398 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1399 Inst.addOperand(MCOperand::CreateImm(encoding));
1400 }
1401
1402 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1403 // Branch operands don't encode the low bits, so shift them off
1404 // here. If it's a label, however, just put it on directly as there's
1405 // not enough information now to do anything.
1406 assert(N == 1 && "Invalid number of operands!");
1407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 if (!MCE) {
1409 addExpr(Inst, getImm());
1410 return;
1411 }
1412 assert(MCE && "Invalid constant immediate operand!");
1413 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1414 }
1415
1416 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1417 // Branch operands don't encode the low bits, so shift them off
1418 // here. If it's a label, however, just put it on directly as there's
1419 // not enough information now to do anything.
1420 assert(N == 1 && "Invalid number of operands!");
1421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 if (!MCE) {
1423 addExpr(Inst, getImm());
1424 return;
1425 }
1426 assert(MCE && "Invalid constant immediate operand!");
1427 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1428 }
1429
1430 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1431 // Branch operands don't encode the low bits, so shift them off
1432 // here. If it's a label, however, just put it on directly as there's
1433 // not enough information now to do anything.
1434 assert(N == 1 && "Invalid number of operands!");
1435 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1436 if (!MCE) {
1437 addExpr(Inst, getImm());
1438 return;
1439 }
1440 assert(MCE && "Invalid constant immediate operand!");
1441 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1442 }
1443
1444 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1447 }
1448
1449 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1450 assert(N == 1 && "Invalid number of operands!");
1451 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1452 }
1453
1454 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1455 assert(N == 1 && "Invalid number of operands!");
1456
1457 bool Valid;
1458 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1459 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1460
1461 Inst.addOperand(MCOperand::CreateImm(Bits));
1462 }
1463
1464 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1465 assert(N == 1 && "Invalid number of operands!");
1466
1467 bool Valid;
1468 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1469 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1470
1471 Inst.addOperand(MCOperand::CreateImm(Bits));
1472 }
1473
1474 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476
1477 bool Valid;
1478 uint32_t Bits =
1479 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1480
1481 Inst.addOperand(MCOperand::CreateImm(Bits));
1482 }
1483
1484 void addSysCROperands(MCInst &Inst, unsigned N) const {
1485 assert(N == 1 && "Invalid number of operands!");
1486 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1487 }
1488
1489 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1490 assert(N == 1 && "Invalid number of operands!");
1491 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1492 }
1493
1494 void addShifterOperands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!");
1496 unsigned Imm =
1497 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1498 Inst.addOperand(MCOperand::CreateImm(Imm));
1499 }
1500
1501 void addExtendOperands(MCInst &Inst, unsigned N) const {
1502 assert(N == 1 && "Invalid number of operands!");
1503 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1504 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1505 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1506 Inst.addOperand(MCOperand::CreateImm(Imm));
1507 }
1508
1509 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1510 assert(N == 1 && "Invalid number of operands!");
1511 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1512 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1513 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1514 Inst.addOperand(MCOperand::CreateImm(Imm));
1515 }
1516
1517 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1518 assert(N == 2 && "Invalid number of operands!");
1519 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1520 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1521 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1522 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1523 }
1524
1525 // For 8-bit load/store instructions with a register offset, both the
1526 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1527 // they're disambiguated by whether the shift was explicit or implicit rather
1528 // than its size.
1529 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1530 assert(N == 2 && "Invalid number of operands!");
1531 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1532 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1533 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1534 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1535 }
1536
1537 template<int Shift>
1538 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1539 assert(N == 1 && "Invalid number of operands!");
1540
1541 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1542 uint64_t Value = CE->getValue();
1543 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1544 }
1545
1546 template<int Shift>
1547 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1548 assert(N == 1 && "Invalid number of operands!");
1549
1550 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1551 uint64_t Value = CE->getValue();
1552 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1553 }
1554
1555 void print(raw_ostream &OS) const override;
1556
David Blaikie960ea3f2014-06-08 16:18:35 +00001557 static std::unique_ptr<AArch64Operand>
1558 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1559 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001560 Op->Tok.Data = Str.data();
1561 Op->Tok.Length = Str.size();
1562 Op->Tok.IsSuffix = IsSuffix;
1563 Op->StartLoc = S;
1564 Op->EndLoc = S;
1565 return Op;
1566 }
1567
David Blaikie960ea3f2014-06-08 16:18:35 +00001568 static std::unique_ptr<AArch64Operand>
1569 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1570 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001571 Op->Reg.RegNum = RegNum;
1572 Op->Reg.isVector = isVector;
1573 Op->StartLoc = S;
1574 Op->EndLoc = E;
1575 return Op;
1576 }
1577
David Blaikie960ea3f2014-06-08 16:18:35 +00001578 static std::unique_ptr<AArch64Operand>
1579 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1580 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1581 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001582 Op->VectorList.RegNum = RegNum;
1583 Op->VectorList.Count = Count;
1584 Op->VectorList.NumElements = NumElements;
1585 Op->VectorList.ElementKind = ElementKind;
1586 Op->StartLoc = S;
1587 Op->EndLoc = E;
1588 return Op;
1589 }
1590
David Blaikie960ea3f2014-06-08 16:18:35 +00001591 static std::unique_ptr<AArch64Operand>
1592 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1593 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001594 Op->VectorIndex.Val = Idx;
1595 Op->StartLoc = S;
1596 Op->EndLoc = E;
1597 return Op;
1598 }
1599
David Blaikie960ea3f2014-06-08 16:18:35 +00001600 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1601 SMLoc E, MCContext &Ctx) {
1602 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001603 Op->Imm.Val = Val;
1604 Op->StartLoc = S;
1605 Op->EndLoc = E;
1606 return Op;
1607 }
1608
David Blaikie960ea3f2014-06-08 16:18:35 +00001609 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1610 unsigned ShiftAmount,
1611 SMLoc S, SMLoc E,
1612 MCContext &Ctx) {
1613 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001614 Op->ShiftedImm .Val = Val;
1615 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1616 Op->StartLoc = S;
1617 Op->EndLoc = E;
1618 return Op;
1619 }
1620
David Blaikie960ea3f2014-06-08 16:18:35 +00001621 static std::unique_ptr<AArch64Operand>
1622 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1623 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001624 Op->CondCode.Code = Code;
1625 Op->StartLoc = S;
1626 Op->EndLoc = E;
1627 return Op;
1628 }
1629
David Blaikie960ea3f2014-06-08 16:18:35 +00001630 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1631 MCContext &Ctx) {
1632 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001633 Op->FPImm.Val = Val;
1634 Op->StartLoc = S;
1635 Op->EndLoc = S;
1636 return Op;
1637 }
1638
David Blaikie960ea3f2014-06-08 16:18:35 +00001639 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1640 MCContext &Ctx) {
1641 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001642 Op->Barrier.Val = Val;
1643 Op->StartLoc = S;
1644 Op->EndLoc = S;
1645 return Op;
1646 }
1647
David Blaikie960ea3f2014-06-08 16:18:35 +00001648 static std::unique_ptr<AArch64Operand>
1649 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1650 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001651 Op->SysReg.Data = Str.data();
1652 Op->SysReg.Length = Str.size();
1653 Op->SysReg.FeatureBits = FeatureBits;
1654 Op->StartLoc = S;
1655 Op->EndLoc = S;
1656 return Op;
1657 }
1658
David Blaikie960ea3f2014-06-08 16:18:35 +00001659 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1660 SMLoc E, MCContext &Ctx) {
1661 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001662 Op->SysCRImm.Val = Val;
1663 Op->StartLoc = S;
1664 Op->EndLoc = E;
1665 return Op;
1666 }
1667
David Blaikie960ea3f2014-06-08 16:18:35 +00001668 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1669 MCContext &Ctx) {
1670 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001671 Op->Prefetch.Val = Val;
1672 Op->StartLoc = S;
1673 Op->EndLoc = S;
1674 return Op;
1675 }
1676
David Blaikie960ea3f2014-06-08 16:18:35 +00001677 static std::unique_ptr<AArch64Operand>
1678 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1679 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1680 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
Tim Northover3b0846e2014-05-24 12:50:23 +00001681 Op->ShiftExtend.Type = ShOp;
1682 Op->ShiftExtend.Amount = Val;
1683 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1684 Op->StartLoc = S;
1685 Op->EndLoc = E;
1686 return Op;
1687 }
1688};
1689
1690} // end anonymous namespace.
1691
1692void AArch64Operand::print(raw_ostream &OS) const {
1693 switch (Kind) {
1694 case k_FPImm:
1695 OS << "<fpimm " << getFPImm() << "("
1696 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1697 break;
1698 case k_Barrier: {
1699 bool Valid;
1700 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1701 if (Valid)
1702 OS << "<barrier " << Name << ">";
1703 else
1704 OS << "<barrier invalid #" << getBarrier() << ">";
1705 break;
1706 }
1707 case k_Immediate:
1708 getImm()->print(OS);
1709 break;
1710 case k_ShiftedImm: {
1711 unsigned Shift = getShiftedImmShift();
1712 OS << "<shiftedimm ";
1713 getShiftedImmVal()->print(OS);
1714 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1715 break;
1716 }
1717 case k_CondCode:
1718 OS << "<condcode " << getCondCode() << ">";
1719 break;
1720 case k_Register:
1721 OS << "<register " << getReg() << ">";
1722 break;
1723 case k_VectorList: {
1724 OS << "<vectorlist ";
1725 unsigned Reg = getVectorListStart();
1726 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1727 OS << Reg + i << " ";
1728 OS << ">";
1729 break;
1730 }
1731 case k_VectorIndex:
1732 OS << "<vectorindex " << getVectorIndex() << ">";
1733 break;
1734 case k_SysReg:
1735 OS << "<sysreg: " << getSysReg() << '>';
1736 break;
1737 case k_Token:
1738 OS << "'" << getToken() << "'";
1739 break;
1740 case k_SysCR:
1741 OS << "c" << getSysCR();
1742 break;
1743 case k_Prefetch: {
1744 bool Valid;
1745 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1746 if (Valid)
1747 OS << "<prfop " << Name << ">";
1748 else
1749 OS << "<prfop invalid #" << getPrefetch() << ">";
1750 break;
1751 }
1752 case k_ShiftExtend: {
1753 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1754 << getShiftExtendAmount();
1755 if (!hasShiftExtendAmount())
1756 OS << "<imp>";
1757 OS << '>';
1758 break;
1759 }
1760 }
1761}
1762
1763/// @name Auto-generated Match Functions
1764/// {
1765
1766static unsigned MatchRegisterName(StringRef Name);
1767
1768/// }
1769
1770static unsigned matchVectorRegName(StringRef Name) {
1771 return StringSwitch<unsigned>(Name)
1772 .Case("v0", AArch64::Q0)
1773 .Case("v1", AArch64::Q1)
1774 .Case("v2", AArch64::Q2)
1775 .Case("v3", AArch64::Q3)
1776 .Case("v4", AArch64::Q4)
1777 .Case("v5", AArch64::Q5)
1778 .Case("v6", AArch64::Q6)
1779 .Case("v7", AArch64::Q7)
1780 .Case("v8", AArch64::Q8)
1781 .Case("v9", AArch64::Q9)
1782 .Case("v10", AArch64::Q10)
1783 .Case("v11", AArch64::Q11)
1784 .Case("v12", AArch64::Q12)
1785 .Case("v13", AArch64::Q13)
1786 .Case("v14", AArch64::Q14)
1787 .Case("v15", AArch64::Q15)
1788 .Case("v16", AArch64::Q16)
1789 .Case("v17", AArch64::Q17)
1790 .Case("v18", AArch64::Q18)
1791 .Case("v19", AArch64::Q19)
1792 .Case("v20", AArch64::Q20)
1793 .Case("v21", AArch64::Q21)
1794 .Case("v22", AArch64::Q22)
1795 .Case("v23", AArch64::Q23)
1796 .Case("v24", AArch64::Q24)
1797 .Case("v25", AArch64::Q25)
1798 .Case("v26", AArch64::Q26)
1799 .Case("v27", AArch64::Q27)
1800 .Case("v28", AArch64::Q28)
1801 .Case("v29", AArch64::Q29)
1802 .Case("v30", AArch64::Q30)
1803 .Case("v31", AArch64::Q31)
1804 .Default(0);
1805}
1806
1807static bool isValidVectorKind(StringRef Name) {
1808 return StringSwitch<bool>(Name.lower())
1809 .Case(".8b", true)
1810 .Case(".16b", true)
1811 .Case(".4h", true)
1812 .Case(".8h", true)
1813 .Case(".2s", true)
1814 .Case(".4s", true)
1815 .Case(".1d", true)
1816 .Case(".2d", true)
1817 .Case(".1q", true)
1818 // Accept the width neutral ones, too, for verbose syntax. If those
1819 // aren't used in the right places, the token operand won't match so
1820 // all will work out.
1821 .Case(".b", true)
1822 .Case(".h", true)
1823 .Case(".s", true)
1824 .Case(".d", true)
1825 .Default(false);
1826}
1827
1828static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1829 char &ElementKind) {
1830 assert(isValidVectorKind(Name));
1831
1832 ElementKind = Name.lower()[Name.size() - 1];
1833 NumElements = 0;
1834
1835 if (Name.size() == 2)
1836 return;
1837
1838 // Parse the lane count
1839 Name = Name.drop_front();
1840 while (isdigit(Name.front())) {
1841 NumElements = 10 * NumElements + (Name.front() - '0');
1842 Name = Name.drop_front();
1843 }
1844}
1845
1846bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1847 SMLoc &EndLoc) {
1848 StartLoc = getLoc();
1849 RegNo = tryParseRegister();
1850 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1851 return (RegNo == (unsigned)-1);
1852}
1853
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +00001854// Matches a register name or register alias previously defined by '.req'
1855unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1856 bool isVector) {
1857 unsigned RegNum = isVector ? matchVectorRegName(Name)
1858 : MatchRegisterName(Name);
1859
1860 if (RegNum == 0) {
1861 // Check for aliases registered via .req. Canonicalize to lower case.
1862 // That's more consistent since register names are case insensitive, and
1863 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1864 auto Entry = RegisterReqs.find(Name.lower());
1865 if (Entry == RegisterReqs.end())
1866 return 0;
1867 // set RegNum if the match is the right kind of register
1868 if (isVector == Entry->getValue().first)
1869 RegNum = Entry->getValue().second;
1870 }
1871 return RegNum;
1872}
1873
Tim Northover3b0846e2014-05-24 12:50:23 +00001874/// tryParseRegister - Try to parse a register name. The token must be an
1875/// Identifier when called, and if it is a register name the token is eaten and
1876/// the register is added to the operand list.
1877int AArch64AsmParser::tryParseRegister() {
1878 const AsmToken &Tok = Parser.getTok();
1879 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1880
1881 std::string lowerCase = Tok.getString().lower();
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +00001882 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001883 // Also handle a few aliases of registers.
1884 if (RegNum == 0)
1885 RegNum = StringSwitch<unsigned>(lowerCase)
1886 .Case("fp", AArch64::FP)
1887 .Case("lr", AArch64::LR)
1888 .Case("x31", AArch64::XZR)
1889 .Case("w31", AArch64::WZR)
1890 .Default(0);
1891
1892 if (RegNum == 0)
1893 return -1;
1894
1895 Parser.Lex(); // Eat identifier token.
1896 return RegNum;
1897}
1898
1899/// tryMatchVectorRegister - Try to parse a vector register name with optional
1900/// kind specifier. If it is a register specifier, eat the token and return it.
1901int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1902 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1903 TokError("vector register expected");
1904 return -1;
1905 }
1906
1907 StringRef Name = Parser.getTok().getString();
1908 // If there is a kind specifier, it's separated from the register name by
1909 // a '.'.
1910 size_t Start = 0, Next = Name.find('.');
1911 StringRef Head = Name.slice(Start, Next);
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +00001912 unsigned RegNum = matchRegisterNameAlias(Head, true);
1913
Tim Northover3b0846e2014-05-24 12:50:23 +00001914 if (RegNum) {
1915 if (Next != StringRef::npos) {
1916 Kind = Name.slice(Next, StringRef::npos);
1917 if (!isValidVectorKind(Kind)) {
1918 TokError("invalid vector kind qualifier");
1919 return -1;
1920 }
1921 }
1922 Parser.Lex(); // Eat the register token.
1923 return RegNum;
1924 }
1925
1926 if (expected)
1927 TokError("vector register expected");
1928 return -1;
1929}
1930
1931/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1932AArch64AsmParser::OperandMatchResultTy
1933AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1934 SMLoc S = getLoc();
1935
1936 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1937 Error(S, "Expected cN operand where 0 <= N <= 15");
1938 return MatchOperand_ParseFail;
1939 }
1940
1941 StringRef Tok = Parser.getTok().getIdentifier();
1942 if (Tok[0] != 'c' && Tok[0] != 'C') {
1943 Error(S, "Expected cN operand where 0 <= N <= 15");
1944 return MatchOperand_ParseFail;
1945 }
1946
1947 uint32_t CRNum;
1948 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1949 if (BadNum || CRNum > 15) {
1950 Error(S, "Expected cN operand where 0 <= N <= 15");
1951 return MatchOperand_ParseFail;
1952 }
1953
1954 Parser.Lex(); // Eat identifier token.
1955 Operands.push_back(
1956 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1957 return MatchOperand_Success;
1958}
1959
1960/// tryParsePrefetch - Try to parse a prefetch operand.
1961AArch64AsmParser::OperandMatchResultTy
1962AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1963 SMLoc S = getLoc();
1964 const AsmToken &Tok = Parser.getTok();
1965 // Either an identifier for named values or a 5-bit immediate.
1966 bool Hash = Tok.is(AsmToken::Hash);
1967 if (Hash || Tok.is(AsmToken::Integer)) {
1968 if (Hash)
1969 Parser.Lex(); // Eat hash token.
1970 const MCExpr *ImmVal;
1971 if (getParser().parseExpression(ImmVal))
1972 return MatchOperand_ParseFail;
1973
1974 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1975 if (!MCE) {
1976 TokError("immediate value expected for prefetch operand");
1977 return MatchOperand_ParseFail;
1978 }
1979 unsigned prfop = MCE->getValue();
1980 if (prfop > 31) {
1981 TokError("prefetch operand out of range, [0,31] expected");
1982 return MatchOperand_ParseFail;
1983 }
1984
1985 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1986 return MatchOperand_Success;
1987 }
1988
1989 if (Tok.isNot(AsmToken::Identifier)) {
1990 TokError("pre-fetch hint expected");
1991 return MatchOperand_ParseFail;
1992 }
1993
1994 bool Valid;
1995 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1996 if (!Valid) {
1997 TokError("pre-fetch hint expected");
1998 return MatchOperand_ParseFail;
1999 }
2000
2001 Parser.Lex(); // Eat identifier token.
2002 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2003 return MatchOperand_Success;
2004}
2005
2006/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2007/// instruction.
2008AArch64AsmParser::OperandMatchResultTy
2009AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2010 SMLoc S = getLoc();
2011 const MCExpr *Expr;
2012
2013 if (Parser.getTok().is(AsmToken::Hash)) {
2014 Parser.Lex(); // Eat hash token.
2015 }
2016
2017 if (parseSymbolicImmVal(Expr))
2018 return MatchOperand_ParseFail;
2019
2020 AArch64MCExpr::VariantKind ELFRefKind;
2021 MCSymbolRefExpr::VariantKind DarwinRefKind;
2022 int64_t Addend;
2023 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2024 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2025 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2026 // No modifier was specified at all; this is the syntax for an ELF basic
2027 // ADRP relocation (unfortunately).
2028 Expr =
2029 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2030 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2031 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2032 Addend != 0) {
2033 Error(S, "gotpage label reference not allowed an addend");
2034 return MatchOperand_ParseFail;
2035 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2036 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2037 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2038 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2039 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2040 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2041 // The operand must be an @page or @gotpage qualified symbolref.
2042 Error(S, "page or gotpage label reference expected");
2043 return MatchOperand_ParseFail;
2044 }
2045 }
2046
2047 // We have either a label reference possibly with addend or an immediate. The
2048 // addend is a raw value here. The linker will adjust it to only reference the
2049 // page.
2050 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2051 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2052
2053 return MatchOperand_Success;
2054}
2055
2056/// tryParseAdrLabel - Parse and validate a source label for the ADR
2057/// instruction.
2058AArch64AsmParser::OperandMatchResultTy
2059AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2060 SMLoc S = getLoc();
2061 const MCExpr *Expr;
2062
2063 if (Parser.getTok().is(AsmToken::Hash)) {
2064 Parser.Lex(); // Eat hash token.
2065 }
2066
2067 if (getParser().parseExpression(Expr))
2068 return MatchOperand_ParseFail;
2069
2070 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2071 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2072
2073 return MatchOperand_Success;
2074}
2075
2076/// tryParseFPImm - A floating point immediate expression operand.
2077AArch64AsmParser::OperandMatchResultTy
2078AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2079 SMLoc S = getLoc();
2080
2081 bool Hash = false;
2082 if (Parser.getTok().is(AsmToken::Hash)) {
2083 Parser.Lex(); // Eat '#'
2084 Hash = true;
2085 }
2086
2087 // Handle negation, as that still comes through as a separate token.
2088 bool isNegative = false;
2089 if (Parser.getTok().is(AsmToken::Minus)) {
2090 isNegative = true;
2091 Parser.Lex();
2092 }
2093 const AsmToken &Tok = Parser.getTok();
2094 if (Tok.is(AsmToken::Real)) {
2095 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2096 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2097 // If we had a '-' in front, toggle the sign bit.
2098 IntVal ^= (uint64_t)isNegative << 63;
2099 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2100 Parser.Lex(); // Eat the token.
2101 // Check for out of range values. As an exception, we let Zero through,
2102 // as we handle that special case in post-processing before matching in
2103 // order to use the zero register for it.
2104 if (Val == -1 && !RealVal.isZero()) {
2105 TokError("expected compatible register or floating-point constant");
2106 return MatchOperand_ParseFail;
2107 }
2108 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2109 return MatchOperand_Success;
2110 }
2111 if (Tok.is(AsmToken::Integer)) {
2112 int64_t Val;
2113 if (!isNegative && Tok.getString().startswith("0x")) {
2114 Val = Tok.getIntVal();
2115 if (Val > 255 || Val < 0) {
2116 TokError("encoded floating point value out of range");
2117 return MatchOperand_ParseFail;
2118 }
2119 } else {
2120 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2121 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2122 // If we had a '-' in front, toggle the sign bit.
2123 IntVal ^= (uint64_t)isNegative << 63;
2124 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2125 }
2126 Parser.Lex(); // Eat the token.
2127 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2128 return MatchOperand_Success;
2129 }
2130
2131 if (!Hash)
2132 return MatchOperand_NoMatch;
2133
2134 TokError("invalid floating point immediate");
2135 return MatchOperand_ParseFail;
2136}
2137
2138/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2139AArch64AsmParser::OperandMatchResultTy
2140AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2141 SMLoc S = getLoc();
2142
2143 if (Parser.getTok().is(AsmToken::Hash))
2144 Parser.Lex(); // Eat '#'
2145 else if (Parser.getTok().isNot(AsmToken::Integer))
2146 // Operand should start from # or should be integer, emit error otherwise.
2147 return MatchOperand_NoMatch;
2148
2149 const MCExpr *Imm;
2150 if (parseSymbolicImmVal(Imm))
2151 return MatchOperand_ParseFail;
2152 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2153 uint64_t ShiftAmount = 0;
2154 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2155 if (MCE) {
2156 int64_t Val = MCE->getValue();
2157 if (Val > 0xfff && (Val & 0xfff) == 0) {
2158 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2159 ShiftAmount = 12;
2160 }
2161 }
2162 SMLoc E = Parser.getTok().getLoc();
2163 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2164 getContext()));
2165 return MatchOperand_Success;
2166 }
2167
2168 // Eat ','
2169 Parser.Lex();
2170
2171 // The optional operand must be "lsl #N" where N is non-negative.
2172 if (!Parser.getTok().is(AsmToken::Identifier) ||
2173 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2174 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2175 return MatchOperand_ParseFail;
2176 }
2177
2178 // Eat 'lsl'
2179 Parser.Lex();
2180
2181 if (Parser.getTok().is(AsmToken::Hash)) {
2182 Parser.Lex();
2183 }
2184
2185 if (Parser.getTok().isNot(AsmToken::Integer)) {
2186 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2187 return MatchOperand_ParseFail;
2188 }
2189
2190 int64_t ShiftAmount = Parser.getTok().getIntVal();
2191
2192 if (ShiftAmount < 0) {
2193 Error(Parser.getTok().getLoc(), "positive shift amount required");
2194 return MatchOperand_ParseFail;
2195 }
2196 Parser.Lex(); // Eat the number
2197
2198 SMLoc E = Parser.getTok().getLoc();
2199 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2200 S, E, getContext()));
2201 return MatchOperand_Success;
2202}
2203
2204/// parseCondCodeString - Parse a Condition Code string.
2205AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2206 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2207 .Case("eq", AArch64CC::EQ)
2208 .Case("ne", AArch64CC::NE)
2209 .Case("cs", AArch64CC::HS)
2210 .Case("hs", AArch64CC::HS)
2211 .Case("cc", AArch64CC::LO)
2212 .Case("lo", AArch64CC::LO)
2213 .Case("mi", AArch64CC::MI)
2214 .Case("pl", AArch64CC::PL)
2215 .Case("vs", AArch64CC::VS)
2216 .Case("vc", AArch64CC::VC)
2217 .Case("hi", AArch64CC::HI)
2218 .Case("ls", AArch64CC::LS)
2219 .Case("ge", AArch64CC::GE)
2220 .Case("lt", AArch64CC::LT)
2221 .Case("gt", AArch64CC::GT)
2222 .Case("le", AArch64CC::LE)
2223 .Case("al", AArch64CC::AL)
2224 .Case("nv", AArch64CC::NV)
2225 .Default(AArch64CC::Invalid);
2226 return CC;
2227}
2228
2229/// parseCondCode - Parse a Condition Code operand.
2230bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2231 bool invertCondCode) {
2232 SMLoc S = getLoc();
2233 const AsmToken &Tok = Parser.getTok();
2234 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2235
2236 StringRef Cond = Tok.getString();
2237 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2238 if (CC == AArch64CC::Invalid)
2239 return TokError("invalid condition code");
2240 Parser.Lex(); // Eat identifier token.
2241
Artyom Skrobov6c8682e2014-06-10 13:11:35 +00002242 if (invertCondCode) {
2243 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2244 return TokError("condition codes AL and NV are invalid for this instruction");
Tim Northover3b0846e2014-05-24 12:50:23 +00002245 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
Artyom Skrobov6c8682e2014-06-10 13:11:35 +00002246 }
Tim Northover3b0846e2014-05-24 12:50:23 +00002247
2248 Operands.push_back(
2249 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2250 return false;
2251}
2252
2253/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2254/// them if present.
2255AArch64AsmParser::OperandMatchResultTy
2256AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2257 const AsmToken &Tok = Parser.getTok();
2258 std::string LowerID = Tok.getString().lower();
2259 AArch64_AM::ShiftExtendType ShOp =
2260 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2261 .Case("lsl", AArch64_AM::LSL)
2262 .Case("lsr", AArch64_AM::LSR)
2263 .Case("asr", AArch64_AM::ASR)
2264 .Case("ror", AArch64_AM::ROR)
2265 .Case("msl", AArch64_AM::MSL)
2266 .Case("uxtb", AArch64_AM::UXTB)
2267 .Case("uxth", AArch64_AM::UXTH)
2268 .Case("uxtw", AArch64_AM::UXTW)
2269 .Case("uxtx", AArch64_AM::UXTX)
2270 .Case("sxtb", AArch64_AM::SXTB)
2271 .Case("sxth", AArch64_AM::SXTH)
2272 .Case("sxtw", AArch64_AM::SXTW)
2273 .Case("sxtx", AArch64_AM::SXTX)
2274 .Default(AArch64_AM::InvalidShiftExtend);
2275
2276 if (ShOp == AArch64_AM::InvalidShiftExtend)
2277 return MatchOperand_NoMatch;
2278
2279 SMLoc S = Tok.getLoc();
2280 Parser.Lex();
2281
2282 bool Hash = getLexer().is(AsmToken::Hash);
2283 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2284 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2285 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2286 ShOp == AArch64_AM::MSL) {
2287 // We expect a number here.
2288 TokError("expected #imm after shift specifier");
2289 return MatchOperand_ParseFail;
2290 }
2291
2292 // "extend" type operatoins don't need an immediate, #0 is implicit.
2293 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2294 Operands.push_back(
2295 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2296 return MatchOperand_Success;
2297 }
2298
2299 if (Hash)
2300 Parser.Lex(); // Eat the '#'.
2301
Jim Grosbach57fd2622014-09-23 22:16:02 +00002302 // Make sure we do actually have a number or a parenthesized expression.
2303 SMLoc E = Parser.getTok().getLoc();
2304 if (!Parser.getTok().is(AsmToken::Integer) &&
2305 !Parser.getTok().is(AsmToken::LParen)) {
2306 Error(E, "expected integer shift amount");
Tim Northover3b0846e2014-05-24 12:50:23 +00002307 return MatchOperand_ParseFail;
2308 }
2309
2310 const MCExpr *ImmVal;
2311 if (getParser().parseExpression(ImmVal))
2312 return MatchOperand_ParseFail;
2313
2314 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2315 if (!MCE) {
Jim Grosbach57fd2622014-09-23 22:16:02 +00002316 Error(E, "expected constant '#imm' after shift specifier");
Tim Northover3b0846e2014-05-24 12:50:23 +00002317 return MatchOperand_ParseFail;
2318 }
2319
Jim Grosbach57fd2622014-09-23 22:16:02 +00002320 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Tim Northover3b0846e2014-05-24 12:50:23 +00002321 Operands.push_back(AArch64Operand::CreateShiftExtend(
2322 ShOp, MCE->getValue(), true, S, E, getContext()));
2323 return MatchOperand_Success;
2324}
2325
2326/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2327/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2328bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2329 OperandVector &Operands) {
2330 if (Name.find('.') != StringRef::npos)
2331 return TokError("invalid operand");
2332
2333 Mnemonic = Name;
2334 Operands.push_back(
2335 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2336
2337 const AsmToken &Tok = Parser.getTok();
2338 StringRef Op = Tok.getString();
2339 SMLoc S = Tok.getLoc();
2340
2341 const MCExpr *Expr = nullptr;
2342
2343#define SYS_ALIAS(op1, Cn, Cm, op2) \
2344 do { \
2345 Expr = MCConstantExpr::Create(op1, getContext()); \
2346 Operands.push_back( \
2347 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2348 Operands.push_back( \
2349 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2350 Operands.push_back( \
2351 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2352 Expr = MCConstantExpr::Create(op2, getContext()); \
2353 Operands.push_back( \
2354 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2355 } while (0)
2356
2357 if (Mnemonic == "ic") {
2358 if (!Op.compare_lower("ialluis")) {
2359 // SYS #0, C7, C1, #0
2360 SYS_ALIAS(0, 7, 1, 0);
2361 } else if (!Op.compare_lower("iallu")) {
2362 // SYS #0, C7, C5, #0
2363 SYS_ALIAS(0, 7, 5, 0);
2364 } else if (!Op.compare_lower("ivau")) {
2365 // SYS #3, C7, C5, #1
2366 SYS_ALIAS(3, 7, 5, 1);
2367 } else {
2368 return TokError("invalid operand for IC instruction");
2369 }
2370 } else if (Mnemonic == "dc") {
2371 if (!Op.compare_lower("zva")) {
2372 // SYS #3, C7, C4, #1
2373 SYS_ALIAS(3, 7, 4, 1);
2374 } else if (!Op.compare_lower("ivac")) {
2375 // SYS #3, C7, C6, #1
2376 SYS_ALIAS(0, 7, 6, 1);
2377 } else if (!Op.compare_lower("isw")) {
2378 // SYS #0, C7, C6, #2
2379 SYS_ALIAS(0, 7, 6, 2);
2380 } else if (!Op.compare_lower("cvac")) {
2381 // SYS #3, C7, C10, #1
2382 SYS_ALIAS(3, 7, 10, 1);
2383 } else if (!Op.compare_lower("csw")) {
2384 // SYS #0, C7, C10, #2
2385 SYS_ALIAS(0, 7, 10, 2);
2386 } else if (!Op.compare_lower("cvau")) {
2387 // SYS #3, C7, C11, #1
2388 SYS_ALIAS(3, 7, 11, 1);
2389 } else if (!Op.compare_lower("civac")) {
2390 // SYS #3, C7, C14, #1
2391 SYS_ALIAS(3, 7, 14, 1);
2392 } else if (!Op.compare_lower("cisw")) {
2393 // SYS #0, C7, C14, #2
2394 SYS_ALIAS(0, 7, 14, 2);
2395 } else {
2396 return TokError("invalid operand for DC instruction");
2397 }
2398 } else if (Mnemonic == "at") {
2399 if (!Op.compare_lower("s1e1r")) {
2400 // SYS #0, C7, C8, #0
2401 SYS_ALIAS(0, 7, 8, 0);
2402 } else if (!Op.compare_lower("s1e2r")) {
2403 // SYS #4, C7, C8, #0
2404 SYS_ALIAS(4, 7, 8, 0);
2405 } else if (!Op.compare_lower("s1e3r")) {
2406 // SYS #6, C7, C8, #0
2407 SYS_ALIAS(6, 7, 8, 0);
2408 } else if (!Op.compare_lower("s1e1w")) {
2409 // SYS #0, C7, C8, #1
2410 SYS_ALIAS(0, 7, 8, 1);
2411 } else if (!Op.compare_lower("s1e2w")) {
2412 // SYS #4, C7, C8, #1
2413 SYS_ALIAS(4, 7, 8, 1);
2414 } else if (!Op.compare_lower("s1e3w")) {
2415 // SYS #6, C7, C8, #1
2416 SYS_ALIAS(6, 7, 8, 1);
2417 } else if (!Op.compare_lower("s1e0r")) {
2418 // SYS #0, C7, C8, #3
2419 SYS_ALIAS(0, 7, 8, 2);
2420 } else if (!Op.compare_lower("s1e0w")) {
2421 // SYS #0, C7, C8, #3
2422 SYS_ALIAS(0, 7, 8, 3);
2423 } else if (!Op.compare_lower("s12e1r")) {
2424 // SYS #4, C7, C8, #4
2425 SYS_ALIAS(4, 7, 8, 4);
2426 } else if (!Op.compare_lower("s12e1w")) {
2427 // SYS #4, C7, C8, #5
2428 SYS_ALIAS(4, 7, 8, 5);
2429 } else if (!Op.compare_lower("s12e0r")) {
2430 // SYS #4, C7, C8, #6
2431 SYS_ALIAS(4, 7, 8, 6);
2432 } else if (!Op.compare_lower("s12e0w")) {
2433 // SYS #4, C7, C8, #7
2434 SYS_ALIAS(4, 7, 8, 7);
2435 } else {
2436 return TokError("invalid operand for AT instruction");
2437 }
2438 } else if (Mnemonic == "tlbi") {
2439 if (!Op.compare_lower("vmalle1is")) {
2440 // SYS #0, C8, C3, #0
2441 SYS_ALIAS(0, 8, 3, 0);
2442 } else if (!Op.compare_lower("alle2is")) {
2443 // SYS #4, C8, C3, #0
2444 SYS_ALIAS(4, 8, 3, 0);
2445 } else if (!Op.compare_lower("alle3is")) {
2446 // SYS #6, C8, C3, #0
2447 SYS_ALIAS(6, 8, 3, 0);
2448 } else if (!Op.compare_lower("vae1is")) {
2449 // SYS #0, C8, C3, #1
2450 SYS_ALIAS(0, 8, 3, 1);
2451 } else if (!Op.compare_lower("vae2is")) {
2452 // SYS #4, C8, C3, #1
2453 SYS_ALIAS(4, 8, 3, 1);
2454 } else if (!Op.compare_lower("vae3is")) {
2455 // SYS #6, C8, C3, #1
2456 SYS_ALIAS(6, 8, 3, 1);
2457 } else if (!Op.compare_lower("aside1is")) {
2458 // SYS #0, C8, C3, #2
2459 SYS_ALIAS(0, 8, 3, 2);
2460 } else if (!Op.compare_lower("vaae1is")) {
2461 // SYS #0, C8, C3, #3
2462 SYS_ALIAS(0, 8, 3, 3);
2463 } else if (!Op.compare_lower("alle1is")) {
2464 // SYS #4, C8, C3, #4
2465 SYS_ALIAS(4, 8, 3, 4);
2466 } else if (!Op.compare_lower("vale1is")) {
2467 // SYS #0, C8, C3, #5
2468 SYS_ALIAS(0, 8, 3, 5);
2469 } else if (!Op.compare_lower("vaale1is")) {
2470 // SYS #0, C8, C3, #7
2471 SYS_ALIAS(0, 8, 3, 7);
2472 } else if (!Op.compare_lower("vmalle1")) {
2473 // SYS #0, C8, C7, #0
2474 SYS_ALIAS(0, 8, 7, 0);
2475 } else if (!Op.compare_lower("alle2")) {
2476 // SYS #4, C8, C7, #0
2477 SYS_ALIAS(4, 8, 7, 0);
2478 } else if (!Op.compare_lower("vale2is")) {
2479 // SYS #4, C8, C3, #5
2480 SYS_ALIAS(4, 8, 3, 5);
2481 } else if (!Op.compare_lower("vale3is")) {
2482 // SYS #6, C8, C3, #5
2483 SYS_ALIAS(6, 8, 3, 5);
2484 } else if (!Op.compare_lower("alle3")) {
2485 // SYS #6, C8, C7, #0
2486 SYS_ALIAS(6, 8, 7, 0);
2487 } else if (!Op.compare_lower("vae1")) {
2488 // SYS #0, C8, C7, #1
2489 SYS_ALIAS(0, 8, 7, 1);
2490 } else if (!Op.compare_lower("vae2")) {
2491 // SYS #4, C8, C7, #1
2492 SYS_ALIAS(4, 8, 7, 1);
2493 } else if (!Op.compare_lower("vae3")) {
2494 // SYS #6, C8, C7, #1
2495 SYS_ALIAS(6, 8, 7, 1);
2496 } else if (!Op.compare_lower("aside1")) {
2497 // SYS #0, C8, C7, #2
2498 SYS_ALIAS(0, 8, 7, 2);
2499 } else if (!Op.compare_lower("vaae1")) {
2500 // SYS #0, C8, C7, #3
2501 SYS_ALIAS(0, 8, 7, 3);
2502 } else if (!Op.compare_lower("alle1")) {
2503 // SYS #4, C8, C7, #4
2504 SYS_ALIAS(4, 8, 7, 4);
2505 } else if (!Op.compare_lower("vale1")) {
2506 // SYS #0, C8, C7, #5
2507 SYS_ALIAS(0, 8, 7, 5);
2508 } else if (!Op.compare_lower("vale2")) {
2509 // SYS #4, C8, C7, #5
2510 SYS_ALIAS(4, 8, 7, 5);
2511 } else if (!Op.compare_lower("vale3")) {
2512 // SYS #6, C8, C7, #5
2513 SYS_ALIAS(6, 8, 7, 5);
2514 } else if (!Op.compare_lower("vaale1")) {
2515 // SYS #0, C8, C7, #7
2516 SYS_ALIAS(0, 8, 7, 7);
2517 } else if (!Op.compare_lower("ipas2e1")) {
2518 // SYS #4, C8, C4, #1
2519 SYS_ALIAS(4, 8, 4, 1);
2520 } else if (!Op.compare_lower("ipas2le1")) {
2521 // SYS #4, C8, C4, #5
2522 SYS_ALIAS(4, 8, 4, 5);
2523 } else if (!Op.compare_lower("ipas2e1is")) {
2524 // SYS #4, C8, C4, #1
2525 SYS_ALIAS(4, 8, 0, 1);
2526 } else if (!Op.compare_lower("ipas2le1is")) {
2527 // SYS #4, C8, C4, #5
2528 SYS_ALIAS(4, 8, 0, 5);
2529 } else if (!Op.compare_lower("vmalls12e1")) {
2530 // SYS #4, C8, C7, #6
2531 SYS_ALIAS(4, 8, 7, 6);
2532 } else if (!Op.compare_lower("vmalls12e1is")) {
2533 // SYS #4, C8, C3, #6
2534 SYS_ALIAS(4, 8, 3, 6);
2535 } else {
2536 return TokError("invalid operand for TLBI instruction");
2537 }
2538 }
2539
2540#undef SYS_ALIAS
2541
2542 Parser.Lex(); // Eat operand.
2543
2544 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2545 bool HasRegister = false;
2546
2547 // Check for the optional register operand.
2548 if (getLexer().is(AsmToken::Comma)) {
2549 Parser.Lex(); // Eat comma.
2550
2551 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2552 return TokError("expected register operand");
2553
2554 HasRegister = true;
2555 }
2556
2557 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2558 Parser.eatToEndOfStatement();
2559 return TokError("unexpected token in argument list");
2560 }
2561
2562 if (ExpectRegister && !HasRegister) {
2563 return TokError("specified " + Mnemonic + " op requires a register");
2564 }
2565 else if (!ExpectRegister && HasRegister) {
2566 return TokError("specified " + Mnemonic + " op does not use a register");
2567 }
2568
2569 Parser.Lex(); // Consume the EndOfStatement
2570 return false;
2571}
2572
2573AArch64AsmParser::OperandMatchResultTy
2574AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2575 const AsmToken &Tok = Parser.getTok();
2576
2577 // Can be either a #imm style literal or an option name
2578 bool Hash = Tok.is(AsmToken::Hash);
2579 if (Hash || Tok.is(AsmToken::Integer)) {
2580 // Immediate operand.
2581 if (Hash)
2582 Parser.Lex(); // Eat the '#'
2583 const MCExpr *ImmVal;
2584 SMLoc ExprLoc = getLoc();
2585 if (getParser().parseExpression(ImmVal))
2586 return MatchOperand_ParseFail;
2587 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2588 if (!MCE) {
2589 Error(ExprLoc, "immediate value expected for barrier operand");
2590 return MatchOperand_ParseFail;
2591 }
2592 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2593 Error(ExprLoc, "barrier operand out of range");
2594 return MatchOperand_ParseFail;
2595 }
2596 Operands.push_back(
2597 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2598 return MatchOperand_Success;
2599 }
2600
2601 if (Tok.isNot(AsmToken::Identifier)) {
2602 TokError("invalid operand for instruction");
2603 return MatchOperand_ParseFail;
2604 }
2605
2606 bool Valid;
2607 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2608 if (!Valid) {
2609 TokError("invalid barrier option name");
2610 return MatchOperand_ParseFail;
2611 }
2612
2613 // The only valid named option for ISB is 'sy'
2614 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2615 TokError("'sy' or #imm operand expected");
2616 return MatchOperand_ParseFail;
2617 }
2618
2619 Operands.push_back(
2620 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2621 Parser.Lex(); // Consume the option
2622
2623 return MatchOperand_Success;
2624}
2625
2626AArch64AsmParser::OperandMatchResultTy
2627AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2628 const AsmToken &Tok = Parser.getTok();
2629
2630 if (Tok.isNot(AsmToken::Identifier))
2631 return MatchOperand_NoMatch;
2632
2633 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2634 STI.getFeatureBits(), getContext()));
2635 Parser.Lex(); // Eat identifier
2636
2637 return MatchOperand_Success;
2638}
2639
2640/// tryParseVectorRegister - Parse a vector register operand.
2641bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2642 if (Parser.getTok().isNot(AsmToken::Identifier))
2643 return true;
2644
2645 SMLoc S = getLoc();
2646 // Check for a vector register specifier first.
2647 StringRef Kind;
2648 int64_t Reg = tryMatchVectorRegister(Kind, false);
2649 if (Reg == -1)
2650 return true;
2651 Operands.push_back(
2652 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2653 // If there was an explicit qualifier, that goes on as a literal text
2654 // operand.
2655 if (!Kind.empty())
2656 Operands.push_back(
2657 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2658
2659 // If there is an index specifier following the register, parse that too.
2660 if (Parser.getTok().is(AsmToken::LBrac)) {
2661 SMLoc SIdx = getLoc();
2662 Parser.Lex(); // Eat left bracket token.
2663
2664 const MCExpr *ImmVal;
2665 if (getParser().parseExpression(ImmVal))
2666 return false;
2667 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2668 if (!MCE) {
2669 TokError("immediate value expected for vector index");
2670 return false;
2671 }
2672
2673 SMLoc E = getLoc();
2674 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2675 Error(E, "']' expected");
2676 return false;
2677 }
2678
2679 Parser.Lex(); // Eat right bracket token.
2680
2681 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2682 E, getContext()));
2683 }
2684
2685 return false;
2686}
2687
2688/// parseRegister - Parse a non-vector register operand.
2689bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2690 SMLoc S = getLoc();
2691 // Try for a vector register.
2692 if (!tryParseVectorRegister(Operands))
2693 return false;
2694
2695 // Try for a scalar register.
2696 int64_t Reg = tryParseRegister();
2697 if (Reg == -1)
2698 return true;
2699 Operands.push_back(
2700 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2701
2702 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2703 // as a string token in the instruction itself.
2704 if (getLexer().getKind() == AsmToken::LBrac) {
2705 SMLoc LBracS = getLoc();
2706 Parser.Lex();
2707 const AsmToken &Tok = Parser.getTok();
2708 if (Tok.is(AsmToken::Integer)) {
2709 SMLoc IntS = getLoc();
2710 int64_t Val = Tok.getIntVal();
2711 if (Val == 1) {
2712 Parser.Lex();
2713 if (getLexer().getKind() == AsmToken::RBrac) {
2714 SMLoc RBracS = getLoc();
2715 Parser.Lex();
2716 Operands.push_back(
2717 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2718 Operands.push_back(
2719 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2720 Operands.push_back(
2721 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2722 return false;
2723 }
2724 }
2725 }
2726 }
2727
2728 return false;
2729}
2730
2731bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2732 bool HasELFModifier = false;
2733 AArch64MCExpr::VariantKind RefKind;
2734
2735 if (Parser.getTok().is(AsmToken::Colon)) {
2736 Parser.Lex(); // Eat ':"
2737 HasELFModifier = true;
2738
2739 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2740 Error(Parser.getTok().getLoc(),
2741 "expect relocation specifier in operand after ':'");
2742 return true;
2743 }
2744
2745 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2746 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2747 .Case("lo12", AArch64MCExpr::VK_LO12)
2748 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2749 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2750 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2751 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2752 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2753 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2754 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2755 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2756 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2757 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2758 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2759 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2760 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2761 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2762 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2763 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2764 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2765 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2766 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2767 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2768 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2769 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2770 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2771 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2772 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2773 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2774 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2775 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2776 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2777 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2778 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2779 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2780 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2781 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2782 .Default(AArch64MCExpr::VK_INVALID);
2783
2784 if (RefKind == AArch64MCExpr::VK_INVALID) {
2785 Error(Parser.getTok().getLoc(),
2786 "expect relocation specifier in operand after ':'");
2787 return true;
2788 }
2789
2790 Parser.Lex(); // Eat identifier
2791
2792 if (Parser.getTok().isNot(AsmToken::Colon)) {
2793 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2794 return true;
2795 }
2796 Parser.Lex(); // Eat ':'
2797 }
2798
2799 if (getParser().parseExpression(ImmVal))
2800 return true;
2801
2802 if (HasELFModifier)
2803 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2804
2805 return false;
2806}
2807
2808/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2809bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2810 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2811 SMLoc S = getLoc();
2812 Parser.Lex(); // Eat left bracket token.
2813 StringRef Kind;
2814 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2815 if (FirstReg == -1)
2816 return true;
2817 int64_t PrevReg = FirstReg;
2818 unsigned Count = 1;
2819
2820 if (Parser.getTok().is(AsmToken::Minus)) {
2821 Parser.Lex(); // Eat the minus.
2822
2823 SMLoc Loc = getLoc();
2824 StringRef NextKind;
2825 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2826 if (Reg == -1)
2827 return true;
2828 // Any Kind suffices must match on all regs in the list.
2829 if (Kind != NextKind)
2830 return Error(Loc, "mismatched register size suffix");
2831
2832 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2833
2834 if (Space == 0 || Space > 3) {
2835 return Error(Loc, "invalid number of vectors");
2836 }
2837
2838 Count += Space;
2839 }
2840 else {
2841 while (Parser.getTok().is(AsmToken::Comma)) {
2842 Parser.Lex(); // Eat the comma token.
2843
2844 SMLoc Loc = getLoc();
2845 StringRef NextKind;
2846 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2847 if (Reg == -1)
2848 return true;
2849 // Any Kind suffices must match on all regs in the list.
2850 if (Kind != NextKind)
2851 return Error(Loc, "mismatched register size suffix");
2852
2853 // Registers must be incremental (with wraparound at 31)
2854 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2855 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2856 return Error(Loc, "registers must be sequential");
2857
2858 PrevReg = Reg;
2859 ++Count;
2860 }
2861 }
2862
2863 if (Parser.getTok().isNot(AsmToken::RCurly))
2864 return Error(getLoc(), "'}' expected");
2865 Parser.Lex(); // Eat the '}' token.
2866
2867 if (Count > 4)
2868 return Error(S, "invalid number of vectors");
2869
2870 unsigned NumElements = 0;
2871 char ElementKind = 0;
2872 if (!Kind.empty())
2873 parseValidVectorKind(Kind, NumElements, ElementKind);
2874
2875 Operands.push_back(AArch64Operand::CreateVectorList(
2876 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2877
2878 // If there is an index specifier following the list, parse that too.
2879 if (Parser.getTok().is(AsmToken::LBrac)) {
2880 SMLoc SIdx = getLoc();
2881 Parser.Lex(); // Eat left bracket token.
2882
2883 const MCExpr *ImmVal;
2884 if (getParser().parseExpression(ImmVal))
2885 return false;
2886 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2887 if (!MCE) {
2888 TokError("immediate value expected for vector index");
2889 return false;
2890 }
2891
2892 SMLoc E = getLoc();
2893 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2894 Error(E, "']' expected");
2895 return false;
2896 }
2897
2898 Parser.Lex(); // Eat right bracket token.
2899
2900 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2901 E, getContext()));
2902 }
2903 return false;
2904}
2905
2906AArch64AsmParser::OperandMatchResultTy
2907AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2908 const AsmToken &Tok = Parser.getTok();
2909 if (!Tok.is(AsmToken::Identifier))
2910 return MatchOperand_NoMatch;
2911
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +00002912 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
Tim Northover3b0846e2014-05-24 12:50:23 +00002913
2914 MCContext &Ctx = getContext();
2915 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2916 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2917 return MatchOperand_NoMatch;
2918
2919 SMLoc S = getLoc();
2920 Parser.Lex(); // Eat register
2921
2922 if (Parser.getTok().isNot(AsmToken::Comma)) {
2923 Operands.push_back(
2924 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2925 return MatchOperand_Success;
2926 }
2927 Parser.Lex(); // Eat comma.
2928
2929 if (Parser.getTok().is(AsmToken::Hash))
2930 Parser.Lex(); // Eat hash
2931
2932 if (Parser.getTok().isNot(AsmToken::Integer)) {
2933 Error(getLoc(), "index must be absent or #0");
2934 return MatchOperand_ParseFail;
2935 }
2936
2937 const MCExpr *ImmVal;
2938 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2939 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2940 Error(getLoc(), "index must be absent or #0");
2941 return MatchOperand_ParseFail;
2942 }
2943
2944 Operands.push_back(
2945 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2946 return MatchOperand_Success;
2947}
2948
2949/// parseOperand - Parse a arm instruction operand. For now this parses the
2950/// operand regardless of the mnemonic.
2951bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2952 bool invertCondCode) {
2953 // Check if the current operand has a custom associated parser, if so, try to
2954 // custom parse the operand, or fallback to the general approach.
2955 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2956 if (ResTy == MatchOperand_Success)
2957 return false;
2958 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2959 // there was a match, but an error occurred, in which case, just return that
2960 // the operand parsing failed.
2961 if (ResTy == MatchOperand_ParseFail)
2962 return true;
2963
2964 // Nothing custom, so do general case parsing.
2965 SMLoc S, E;
2966 switch (getLexer().getKind()) {
2967 default: {
2968 SMLoc S = getLoc();
2969 const MCExpr *Expr;
2970 if (parseSymbolicImmVal(Expr))
2971 return Error(S, "invalid operand");
2972
2973 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2974 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2975 return false;
2976 }
2977 case AsmToken::LBrac: {
2978 SMLoc Loc = Parser.getTok().getLoc();
2979 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2980 getContext()));
2981 Parser.Lex(); // Eat '['
2982
2983 // There's no comma after a '[', so we can parse the next operand
2984 // immediately.
2985 return parseOperand(Operands, false, false);
2986 }
2987 case AsmToken::LCurly:
2988 return parseVectorList(Operands);
2989 case AsmToken::Identifier: {
2990 // If we're expecting a Condition Code operand, then just parse that.
2991 if (isCondCode)
2992 return parseCondCode(Operands, invertCondCode);
2993
2994 // If it's a register name, parse it.
2995 if (!parseRegister(Operands))
2996 return false;
2997
2998 // This could be an optional "shift" or "extend" operand.
2999 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3000 // We can only continue if no tokens were eaten.
3001 if (GotShift != MatchOperand_NoMatch)
3002 return GotShift;
3003
3004 // This was not a register so parse other operands that start with an
3005 // identifier (like labels) as expressions and create them as immediates.
3006 const MCExpr *IdVal;
3007 S = getLoc();
3008 if (getParser().parseExpression(IdVal))
3009 return true;
3010
3011 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3012 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3013 return false;
3014 }
3015 case AsmToken::Integer:
3016 case AsmToken::Real:
3017 case AsmToken::Hash: {
3018 // #42 -> immediate.
3019 S = getLoc();
3020 if (getLexer().is(AsmToken::Hash))
3021 Parser.Lex();
3022
3023 // Parse a negative sign
3024 bool isNegative = false;
3025 if (Parser.getTok().is(AsmToken::Minus)) {
3026 isNegative = true;
3027 // We need to consume this token only when we have a Real, otherwise
3028 // we let parseSymbolicImmVal take care of it
3029 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3030 Parser.Lex();
3031 }
3032
3033 // The only Real that should come through here is a literal #0.0 for
3034 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3035 // so convert the value.
3036 const AsmToken &Tok = Parser.getTok();
3037 if (Tok.is(AsmToken::Real)) {
3038 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3039 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3040 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3041 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3042 Mnemonic != "fcmlt")
3043 return TokError("unexpected floating point literal");
3044 else if (IntVal != 0 || isNegative)
3045 return TokError("expected floating-point constant #0.0");
3046 Parser.Lex(); // Eat the token.
3047
3048 Operands.push_back(
3049 AArch64Operand::CreateToken("#0", false, S, getContext()));
3050 Operands.push_back(
3051 AArch64Operand::CreateToken(".0", false, S, getContext()));
3052 return false;
3053 }
3054
3055 const MCExpr *ImmVal;
3056 if (parseSymbolicImmVal(ImmVal))
3057 return true;
3058
3059 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3060 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3061 return false;
3062 }
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +00003063 case AsmToken::Equal: {
3064 SMLoc Loc = Parser.getTok().getLoc();
3065 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3066 return Error(Loc, "unexpected token in operand");
3067 Parser.Lex(); // Eat '='
3068 const MCExpr *SubExprVal;
3069 if (getParser().parseExpression(SubExprVal))
3070 return true;
3071
David Peixottoae5ba762014-07-18 16:05:14 +00003072 if (Operands.size() < 2 ||
3073 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3074 return true;
3075
3076 bool IsXReg =
3077 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3078 Operands[1]->getReg());
3079
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +00003080 MCContext& Ctx = getContext();
3081 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3082 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
David Peixottoae5ba762014-07-18 16:05:14 +00003083 if (isa<MCConstantExpr>(SubExprVal)) {
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +00003084 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3085 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3086 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3087 ShiftAmt += 16;
3088 Imm >>= 16;
3089 }
3090 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3091 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3092 Operands.push_back(AArch64Operand::CreateImm(
3093 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3094 if (ShiftAmt)
3095 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3096 ShiftAmt, true, S, E, Ctx));
3097 return false;
3098 }
David Peixottoae5ba762014-07-18 16:05:14 +00003099 APInt Simm = APInt(64, Imm << ShiftAmt);
3100 // check if the immediate is an unsigned or signed 32-bit int for W regs
3101 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3102 return Error(Loc, "Immediate too large for register");
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +00003103 }
3104 // If it is a label or an imm that cannot fit in a movz, put it into CP.
David Peixottoae5ba762014-07-18 16:05:14 +00003105 const MCExpr *CPLoc =
3106 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +00003107 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3108 return false;
3109 }
Tim Northover3b0846e2014-05-24 12:50:23 +00003110 }
3111}
3112
3113/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3114/// operands.
3115bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3116 StringRef Name, SMLoc NameLoc,
3117 OperandVector &Operands) {
3118 Name = StringSwitch<StringRef>(Name.lower())
3119 .Case("beq", "b.eq")
3120 .Case("bne", "b.ne")
3121 .Case("bhs", "b.hs")
3122 .Case("bcs", "b.cs")
3123 .Case("blo", "b.lo")
3124 .Case("bcc", "b.cc")
3125 .Case("bmi", "b.mi")
3126 .Case("bpl", "b.pl")
3127 .Case("bvs", "b.vs")
3128 .Case("bvc", "b.vc")
3129 .Case("bhi", "b.hi")
3130 .Case("bls", "b.ls")
3131 .Case("bge", "b.ge")
3132 .Case("blt", "b.lt")
3133 .Case("bgt", "b.gt")
3134 .Case("ble", "b.le")
3135 .Case("bal", "b.al")
3136 .Case("bnv", "b.nv")
3137 .Default(Name);
3138
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +00003139 // First check for the AArch64-specific .req directive.
3140 if (Parser.getTok().is(AsmToken::Identifier) &&
3141 Parser.getTok().getIdentifier() == ".req") {
3142 parseDirectiveReq(Name, NameLoc);
3143 // We always return 'error' for this, as we're done with this
3144 // statement and don't need to match the 'instruction."
3145 return true;
3146 }
3147
Tim Northover3b0846e2014-05-24 12:50:23 +00003148 // Create the leading tokens for the mnemonic, split by '.' characters.
3149 size_t Start = 0, Next = Name.find('.');
3150 StringRef Head = Name.slice(Start, Next);
3151
3152 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3153 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3154 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3155 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3156 Parser.eatToEndOfStatement();
3157 return IsError;
3158 }
3159
3160 Operands.push_back(
3161 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3162 Mnemonic = Head;
3163
3164 // Handle condition codes for a branch mnemonic
3165 if (Head == "b" && Next != StringRef::npos) {
3166 Start = Next;
3167 Next = Name.find('.', Start + 1);
3168 Head = Name.slice(Start + 1, Next);
3169
3170 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3171 (Head.data() - Name.data()));
3172 AArch64CC::CondCode CC = parseCondCodeString(Head);
3173 if (CC == AArch64CC::Invalid)
3174 return Error(SuffixLoc, "invalid condition code");
3175 Operands.push_back(
3176 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3177 Operands.push_back(
3178 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3179 }
3180
3181 // Add the remaining tokens in the mnemonic.
3182 while (Next != StringRef::npos) {
3183 Start = Next;
3184 Next = Name.find('.', Start + 1);
3185 Head = Name.slice(Start, Next);
3186 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3187 (Head.data() - Name.data()) + 1);
3188 Operands.push_back(
3189 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3190 }
3191
3192 // Conditional compare instructions have a Condition Code operand, which needs
3193 // to be parsed and an immediate operand created.
3194 bool condCodeFourthOperand =
3195 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3196 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3197 Head == "csinc" || Head == "csinv" || Head == "csneg");
3198
3199 // These instructions are aliases to some of the conditional select
3200 // instructions. However, the condition code is inverted in the aliased
3201 // instruction.
3202 //
3203 // FIXME: Is this the correct way to handle these? Or should the parser
3204 // generate the aliased instructions directly?
3205 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3206 bool condCodeThirdOperand =
3207 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3208
3209 // Read the remaining operands.
3210 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3211 // Read the first operand.
3212 if (parseOperand(Operands, false, false)) {
3213 Parser.eatToEndOfStatement();
3214 return true;
3215 }
3216
3217 unsigned N = 2;
3218 while (getLexer().is(AsmToken::Comma)) {
3219 Parser.Lex(); // Eat the comma.
3220
3221 // Parse and remember the operand.
3222 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3223 (N == 3 && condCodeThirdOperand) ||
3224 (N == 2 && condCodeSecondOperand),
3225 condCodeSecondOperand || condCodeThirdOperand)) {
3226 Parser.eatToEndOfStatement();
3227 return true;
3228 }
3229
3230 // After successfully parsing some operands there are two special cases to
3231 // consider (i.e. notional operands not separated by commas). Both are due
3232 // to memory specifiers:
3233 // + An RBrac will end an address for load/store/prefetch
3234 // + An '!' will indicate a pre-indexed operation.
3235 //
3236 // It's someone else's responsibility to make sure these tokens are sane
3237 // in the given context!
3238 if (Parser.getTok().is(AsmToken::RBrac)) {
3239 SMLoc Loc = Parser.getTok().getLoc();
3240 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3241 getContext()));
3242 Parser.Lex();
3243 }
3244
3245 if (Parser.getTok().is(AsmToken::Exclaim)) {
3246 SMLoc Loc = Parser.getTok().getLoc();
3247 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3248 getContext()));
3249 Parser.Lex();
3250 }
3251
3252 ++N;
3253 }
3254 }
3255
3256 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3257 SMLoc Loc = Parser.getTok().getLoc();
3258 Parser.eatToEndOfStatement();
3259 return Error(Loc, "unexpected token in argument list");
3260 }
3261
3262 Parser.Lex(); // Consume the EndOfStatement
3263 return false;
3264}
3265
3266// FIXME: This entire function is a giant hack to provide us with decent
3267// operand range validation/diagnostics until TableGen/MC can be extended
3268// to support autogeneration of this kind of validation.
3269bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3270 SmallVectorImpl<SMLoc> &Loc) {
3271 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3272 // Check for indexed addressing modes w/ the base register being the
3273 // same as a destination/source register or pair load where
3274 // the Rt == Rt2. All of those are undefined behaviour.
3275 switch (Inst.getOpcode()) {
3276 case AArch64::LDPSWpre:
3277 case AArch64::LDPWpost:
3278 case AArch64::LDPWpre:
3279 case AArch64::LDPXpost:
3280 case AArch64::LDPXpre: {
3281 unsigned Rt = Inst.getOperand(1).getReg();
3282 unsigned Rt2 = Inst.getOperand(2).getReg();
3283 unsigned Rn = Inst.getOperand(3).getReg();
3284 if (RI->isSubRegisterEq(Rn, Rt))
3285 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3286 "is also a destination");
3287 if (RI->isSubRegisterEq(Rn, Rt2))
3288 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3289 "is also a destination");
3290 // FALLTHROUGH
3291 }
3292 case AArch64::LDPDi:
3293 case AArch64::LDPQi:
3294 case AArch64::LDPSi:
3295 case AArch64::LDPSWi:
3296 case AArch64::LDPWi:
3297 case AArch64::LDPXi: {
3298 unsigned Rt = Inst.getOperand(0).getReg();
3299 unsigned Rt2 = Inst.getOperand(1).getReg();
3300 if (Rt == Rt2)
3301 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3302 break;
3303 }
3304 case AArch64::LDPDpost:
3305 case AArch64::LDPDpre:
3306 case AArch64::LDPQpost:
3307 case AArch64::LDPQpre:
3308 case AArch64::LDPSpost:
3309 case AArch64::LDPSpre:
3310 case AArch64::LDPSWpost: {
3311 unsigned Rt = Inst.getOperand(1).getReg();
3312 unsigned Rt2 = Inst.getOperand(2).getReg();
3313 if (Rt == Rt2)
3314 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3315 break;
3316 }
3317 case AArch64::STPDpost:
3318 case AArch64::STPDpre:
3319 case AArch64::STPQpost:
3320 case AArch64::STPQpre:
3321 case AArch64::STPSpost:
3322 case AArch64::STPSpre:
3323 case AArch64::STPWpost:
3324 case AArch64::STPWpre:
3325 case AArch64::STPXpost:
3326 case AArch64::STPXpre: {
3327 unsigned Rt = Inst.getOperand(1).getReg();
3328 unsigned Rt2 = Inst.getOperand(2).getReg();
3329 unsigned Rn = Inst.getOperand(3).getReg();
3330 if (RI->isSubRegisterEq(Rn, Rt))
3331 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3332 "is also a source");
3333 if (RI->isSubRegisterEq(Rn, Rt2))
3334 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3335 "is also a source");
3336 break;
3337 }
3338 case AArch64::LDRBBpre:
3339 case AArch64::LDRBpre:
3340 case AArch64::LDRHHpre:
3341 case AArch64::LDRHpre:
3342 case AArch64::LDRSBWpre:
3343 case AArch64::LDRSBXpre:
3344 case AArch64::LDRSHWpre:
3345 case AArch64::LDRSHXpre:
3346 case AArch64::LDRSWpre:
3347 case AArch64::LDRWpre:
3348 case AArch64::LDRXpre:
3349 case AArch64::LDRBBpost:
3350 case AArch64::LDRBpost:
3351 case AArch64::LDRHHpost:
3352 case AArch64::LDRHpost:
3353 case AArch64::LDRSBWpost:
3354 case AArch64::LDRSBXpost:
3355 case AArch64::LDRSHWpost:
3356 case AArch64::LDRSHXpost:
3357 case AArch64::LDRSWpost:
3358 case AArch64::LDRWpost:
3359 case AArch64::LDRXpost: {
3360 unsigned Rt = Inst.getOperand(1).getReg();
3361 unsigned Rn = Inst.getOperand(2).getReg();
3362 if (RI->isSubRegisterEq(Rn, Rt))
3363 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3364 "is also a source");
3365 break;
3366 }
3367 case AArch64::STRBBpost:
3368 case AArch64::STRBpost:
3369 case AArch64::STRHHpost:
3370 case AArch64::STRHpost:
3371 case AArch64::STRWpost:
3372 case AArch64::STRXpost:
3373 case AArch64::STRBBpre:
3374 case AArch64::STRBpre:
3375 case AArch64::STRHHpre:
3376 case AArch64::STRHpre:
3377 case AArch64::STRWpre:
3378 case AArch64::STRXpre: {
3379 unsigned Rt = Inst.getOperand(1).getReg();
3380 unsigned Rn = Inst.getOperand(2).getReg();
3381 if (RI->isSubRegisterEq(Rn, Rt))
3382 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3383 "is also a source");
3384 break;
3385 }
3386 }
3387
3388 // Now check immediate ranges. Separate from the above as there is overlap
3389 // in the instructions being checked and this keeps the nested conditionals
3390 // to a minimum.
3391 switch (Inst.getOpcode()) {
3392 case AArch64::ADDSWri:
3393 case AArch64::ADDSXri:
3394 case AArch64::ADDWri:
3395 case AArch64::ADDXri:
3396 case AArch64::SUBSWri:
3397 case AArch64::SUBSXri:
3398 case AArch64::SUBWri:
3399 case AArch64::SUBXri: {
3400 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3401 // some slight duplication here.
3402 if (Inst.getOperand(2).isExpr()) {
3403 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3404 AArch64MCExpr::VariantKind ELFRefKind;
3405 MCSymbolRefExpr::VariantKind DarwinRefKind;
3406 int64_t Addend;
3407 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3408 return Error(Loc[2], "invalid immediate expression");
3409 }
3410
3411 // Only allow these with ADDXri.
3412 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3413 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3414 Inst.getOpcode() == AArch64::ADDXri)
3415 return false;
3416
3417 // Only allow these with ADDXri/ADDWri
3418 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3419 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3420 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3421 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3422 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3423 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3424 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3425 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3426 (Inst.getOpcode() == AArch64::ADDXri ||
3427 Inst.getOpcode() == AArch64::ADDWri))
3428 return false;
3429
3430 // Don't allow expressions in the immediate field otherwise
3431 return Error(Loc[2], "invalid immediate expression");
3432 }
3433 return false;
3434 }
3435 default:
3436 return false;
3437 }
3438}
3439
3440bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3441 switch (ErrCode) {
3442 case Match_MissingFeature:
3443 return Error(Loc,
3444 "instruction requires a CPU feature not currently enabled");
3445 case Match_InvalidOperand:
3446 return Error(Loc, "invalid operand for instruction");
3447 case Match_InvalidSuffix:
3448 return Error(Loc, "invalid type suffix for instruction");
3449 case Match_InvalidCondCode:
3450 return Error(Loc, "expected AArch64 condition code");
3451 case Match_AddSubRegExtendSmall:
3452 return Error(Loc,
3453 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3454 case Match_AddSubRegExtendLarge:
3455 return Error(Loc,
3456 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3457 case Match_AddSubSecondSource:
3458 return Error(Loc,
3459 "expected compatible register, symbol or integer in range [0, 4095]");
3460 case Match_LogicalSecondSource:
3461 return Error(Loc, "expected compatible register or logical immediate");
3462 case Match_InvalidMovImm32Shift:
3463 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3464 case Match_InvalidMovImm64Shift:
3465 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3466 case Match_AddSubRegShift32:
3467 return Error(Loc,
3468 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3469 case Match_AddSubRegShift64:
3470 return Error(Loc,
3471 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3472 case Match_InvalidFPImm:
3473 return Error(Loc,
3474 "expected compatible register or floating-point constant");
3475 case Match_InvalidMemoryIndexedSImm9:
3476 return Error(Loc, "index must be an integer in range [-256, 255].");
3477 case Match_InvalidMemoryIndexed4SImm7:
3478 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3479 case Match_InvalidMemoryIndexed8SImm7:
3480 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3481 case Match_InvalidMemoryIndexed16SImm7:
3482 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3483 case Match_InvalidMemoryWExtend8:
3484 return Error(Loc,
3485 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3486 case Match_InvalidMemoryWExtend16:
3487 return Error(Loc,
3488 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3489 case Match_InvalidMemoryWExtend32:
3490 return Error(Loc,
3491 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3492 case Match_InvalidMemoryWExtend64:
3493 return Error(Loc,
3494 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3495 case Match_InvalidMemoryWExtend128:
3496 return Error(Loc,
3497 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3498 case Match_InvalidMemoryXExtend8:
3499 return Error(Loc,
3500 "expected 'lsl' or 'sxtx' with optional shift of #0");
3501 case Match_InvalidMemoryXExtend16:
3502 return Error(Loc,
3503 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3504 case Match_InvalidMemoryXExtend32:
3505 return Error(Loc,
3506 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3507 case Match_InvalidMemoryXExtend64:
3508 return Error(Loc,
3509 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3510 case Match_InvalidMemoryXExtend128:
3511 return Error(Loc,
3512 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3513 case Match_InvalidMemoryIndexed1:
3514 return Error(Loc, "index must be an integer in range [0, 4095].");
3515 case Match_InvalidMemoryIndexed2:
3516 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3517 case Match_InvalidMemoryIndexed4:
3518 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3519 case Match_InvalidMemoryIndexed8:
3520 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3521 case Match_InvalidMemoryIndexed16:
3522 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3523 case Match_InvalidImm0_7:
3524 return Error(Loc, "immediate must be an integer in range [0, 7].");
3525 case Match_InvalidImm0_15:
3526 return Error(Loc, "immediate must be an integer in range [0, 15].");
3527 case Match_InvalidImm0_31:
3528 return Error(Loc, "immediate must be an integer in range [0, 31].");
3529 case Match_InvalidImm0_63:
3530 return Error(Loc, "immediate must be an integer in range [0, 63].");
3531 case Match_InvalidImm0_127:
3532 return Error(Loc, "immediate must be an integer in range [0, 127].");
3533 case Match_InvalidImm0_65535:
3534 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3535 case Match_InvalidImm1_8:
3536 return Error(Loc, "immediate must be an integer in range [1, 8].");
3537 case Match_InvalidImm1_16:
3538 return Error(Loc, "immediate must be an integer in range [1, 16].");
3539 case Match_InvalidImm1_32:
3540 return Error(Loc, "immediate must be an integer in range [1, 32].");
3541 case Match_InvalidImm1_64:
3542 return Error(Loc, "immediate must be an integer in range [1, 64].");
3543 case Match_InvalidIndex1:
3544 return Error(Loc, "expected lane specifier '[1]'");
3545 case Match_InvalidIndexB:
3546 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3547 case Match_InvalidIndexH:
3548 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3549 case Match_InvalidIndexS:
3550 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3551 case Match_InvalidIndexD:
3552 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3553 case Match_InvalidLabel:
3554 return Error(Loc, "expected label or encodable integer pc offset");
3555 case Match_MRS:
3556 return Error(Loc, "expected readable system register");
3557 case Match_MSR:
3558 return Error(Loc, "expected writable system register or pstate");
3559 case Match_MnemonicFail:
3560 return Error(Loc, "unrecognized instruction mnemonic");
3561 default:
Craig Topper35b2f752014-06-19 06:10:58 +00003562 llvm_unreachable("unexpected error code!");
Tim Northover3b0846e2014-05-24 12:50:23 +00003563 }
3564}
3565
Tim Northover26bb14e2014-08-18 11:49:42 +00003566static const char *getSubtargetFeatureName(uint64_t Val);
Tim Northover3b0846e2014-05-24 12:50:23 +00003567
3568bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3569 OperandVector &Operands,
3570 MCStreamer &Out,
Tim Northover26bb14e2014-08-18 11:49:42 +00003571 uint64_t &ErrorInfo,
Tim Northover3b0846e2014-05-24 12:50:23 +00003572 bool MatchingInlineAsm) {
3573 assert(!Operands.empty() && "Unexpect empty operand list!");
David Blaikie960ea3f2014-06-08 16:18:35 +00003574 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3575 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
Tim Northover3b0846e2014-05-24 12:50:23 +00003576
David Blaikie960ea3f2014-06-08 16:18:35 +00003577 StringRef Tok = Op.getToken();
Tim Northover3b0846e2014-05-24 12:50:23 +00003578 unsigned NumOperands = Operands.size();
3579
3580 if (NumOperands == 4 && Tok == "lsl") {
David Blaikie960ea3f2014-06-08 16:18:35 +00003581 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3582 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3583 if (Op2.isReg() && Op3.isImm()) {
3584 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00003585 if (Op3CE) {
3586 uint64_t Op3Val = Op3CE->getValue();
3587 uint64_t NewOp3Val = 0;
3588 uint64_t NewOp4Val = 0;
3589 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
David Blaikie960ea3f2014-06-08 16:18:35 +00003590 Op2.getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00003591 NewOp3Val = (32 - Op3Val) & 0x1f;
3592 NewOp4Val = 31 - Op3Val;
3593 } else {
3594 NewOp3Val = (64 - Op3Val) & 0x3f;
3595 NewOp4Val = 63 - Op3Val;
3596 }
3597
3598 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3599 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3600
3601 Operands[0] = AArch64Operand::CreateToken(
David Blaikie960ea3f2014-06-08 16:18:35 +00003602 "ubfm", false, Op.getStartLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003603 Operands.push_back(AArch64Operand::CreateImm(
David Blaikie960ea3f2014-06-08 16:18:35 +00003604 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3605 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3606 Op3.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003607 }
3608 }
3609 } else if (NumOperands == 5) {
3610 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3611 // UBFIZ -> UBFM aliases.
3612 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
David Blaikie960ea3f2014-06-08 16:18:35 +00003613 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3614 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3615 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
Tim Northover3b0846e2014-05-24 12:50:23 +00003616
David Blaikie960ea3f2014-06-08 16:18:35 +00003617 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3618 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3619 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00003620
3621 if (Op3CE && Op4CE) {
3622 uint64_t Op3Val = Op3CE->getValue();
3623 uint64_t Op4Val = Op4CE->getValue();
3624
3625 uint64_t RegWidth = 0;
3626 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
David Blaikie960ea3f2014-06-08 16:18:35 +00003627 Op1.getReg()))
Tim Northover3b0846e2014-05-24 12:50:23 +00003628 RegWidth = 64;
3629 else
3630 RegWidth = 32;
3631
3632 if (Op3Val >= RegWidth)
David Blaikie960ea3f2014-06-08 16:18:35 +00003633 return Error(Op3.getStartLoc(),
Tim Northover3b0846e2014-05-24 12:50:23 +00003634 "expected integer in range [0, 31]");
3635 if (Op4Val < 1 || Op4Val > RegWidth)
David Blaikie960ea3f2014-06-08 16:18:35 +00003636 return Error(Op4.getStartLoc(),
Tim Northover3b0846e2014-05-24 12:50:23 +00003637 "expected integer in range [1, 32]");
3638
3639 uint64_t NewOp3Val = 0;
3640 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
David Blaikie960ea3f2014-06-08 16:18:35 +00003641 Op1.getReg()))
Tim Northover3b0846e2014-05-24 12:50:23 +00003642 NewOp3Val = (32 - Op3Val) & 0x1f;
3643 else
3644 NewOp3Val = (64 - Op3Val) & 0x3f;
3645
3646 uint64_t NewOp4Val = Op4Val - 1;
3647
3648 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
David Blaikie960ea3f2014-06-08 16:18:35 +00003649 return Error(Op4.getStartLoc(),
Tim Northover3b0846e2014-05-24 12:50:23 +00003650 "requested insert overflows register");
3651
3652 const MCExpr *NewOp3 =
3653 MCConstantExpr::Create(NewOp3Val, getContext());
3654 const MCExpr *NewOp4 =
3655 MCConstantExpr::Create(NewOp4Val, getContext());
3656 Operands[3] = AArch64Operand::CreateImm(
David Blaikie960ea3f2014-06-08 16:18:35 +00003657 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003658 Operands[4] = AArch64Operand::CreateImm(
David Blaikie960ea3f2014-06-08 16:18:35 +00003659 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003660 if (Tok == "bfi")
3661 Operands[0] = AArch64Operand::CreateToken(
David Blaikie960ea3f2014-06-08 16:18:35 +00003662 "bfm", false, Op.getStartLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003663 else if (Tok == "sbfiz")
3664 Operands[0] = AArch64Operand::CreateToken(
David Blaikie960ea3f2014-06-08 16:18:35 +00003665 "sbfm", false, Op.getStartLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003666 else if (Tok == "ubfiz")
3667 Operands[0] = AArch64Operand::CreateToken(
David Blaikie960ea3f2014-06-08 16:18:35 +00003668 "ubfm", false, Op.getStartLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003669 else
3670 llvm_unreachable("No valid mnemonic for alias?");
Tim Northover3b0846e2014-05-24 12:50:23 +00003671 }
3672 }
3673
3674 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3675 // UBFX -> UBFM aliases.
3676 } else if (NumOperands == 5 &&
3677 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
David Blaikie960ea3f2014-06-08 16:18:35 +00003678 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3679 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3680 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
Tim Northover3b0846e2014-05-24 12:50:23 +00003681
David Blaikie960ea3f2014-06-08 16:18:35 +00003682 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3683 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3684 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00003685
3686 if (Op3CE && Op4CE) {
3687 uint64_t Op3Val = Op3CE->getValue();
3688 uint64_t Op4Val = Op4CE->getValue();
3689
3690 uint64_t RegWidth = 0;
3691 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
David Blaikie960ea3f2014-06-08 16:18:35 +00003692 Op1.getReg()))
Tim Northover3b0846e2014-05-24 12:50:23 +00003693 RegWidth = 64;
3694 else
3695 RegWidth = 32;
3696
3697 if (Op3Val >= RegWidth)
David Blaikie960ea3f2014-06-08 16:18:35 +00003698 return Error(Op3.getStartLoc(),
Tim Northover3b0846e2014-05-24 12:50:23 +00003699 "expected integer in range [0, 31]");
3700 if (Op4Val < 1 || Op4Val > RegWidth)
David Blaikie960ea3f2014-06-08 16:18:35 +00003701 return Error(Op4.getStartLoc(),
Tim Northover3b0846e2014-05-24 12:50:23 +00003702 "expected integer in range [1, 32]");
3703
3704 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3705
3706 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
David Blaikie960ea3f2014-06-08 16:18:35 +00003707 return Error(Op4.getStartLoc(),
Tim Northover3b0846e2014-05-24 12:50:23 +00003708 "requested extract overflows register");
3709
3710 const MCExpr *NewOp4 =
3711 MCConstantExpr::Create(NewOp4Val, getContext());
3712 Operands[4] = AArch64Operand::CreateImm(
David Blaikie960ea3f2014-06-08 16:18:35 +00003713 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003714 if (Tok == "bfxil")
3715 Operands[0] = AArch64Operand::CreateToken(
David Blaikie960ea3f2014-06-08 16:18:35 +00003716 "bfm", false, Op.getStartLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003717 else if (Tok == "sbfx")
3718 Operands[0] = AArch64Operand::CreateToken(
David Blaikie960ea3f2014-06-08 16:18:35 +00003719 "sbfm", false, Op.getStartLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003720 else if (Tok == "ubfx")
3721 Operands[0] = AArch64Operand::CreateToken(
David Blaikie960ea3f2014-06-08 16:18:35 +00003722 "ubfm", false, Op.getStartLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003723 else
3724 llvm_unreachable("No valid mnemonic for alias?");
Tim Northover3b0846e2014-05-24 12:50:23 +00003725 }
3726 }
3727 }
3728 }
3729 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3730 // InstAlias can't quite handle this since the reg classes aren't
3731 // subclasses.
3732 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3733 // The source register can be Wn here, but the matcher expects a
3734 // GPR64. Twiddle it here if necessary.
David Blaikie960ea3f2014-06-08 16:18:35 +00003735 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3736 if (Op.isReg()) {
3737 unsigned Reg = getXRegFromWReg(Op.getReg());
3738 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3739 Op.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003740 }
3741 }
3742 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3743 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
David Blaikie960ea3f2014-06-08 16:18:35 +00003744 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3745 if (Op.isReg() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00003746 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
David Blaikie960ea3f2014-06-08 16:18:35 +00003747 Op.getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00003748 // The source register can be Wn here, but the matcher expects a
3749 // GPR64. Twiddle it here if necessary.
David Blaikie960ea3f2014-06-08 16:18:35 +00003750 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3751 if (Op.isReg()) {
3752 unsigned Reg = getXRegFromWReg(Op.getReg());
3753 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3754 Op.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003755 }
3756 }
3757 }
3758 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3759 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
David Blaikie960ea3f2014-06-08 16:18:35 +00003760 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3761 if (Op.isReg() &&
Tim Northover3b0846e2014-05-24 12:50:23 +00003762 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
David Blaikie960ea3f2014-06-08 16:18:35 +00003763 Op.getReg())) {
Tim Northover3b0846e2014-05-24 12:50:23 +00003764 // The source register can be Wn here, but the matcher expects a
3765 // GPR32. Twiddle it here if necessary.
David Blaikie960ea3f2014-06-08 16:18:35 +00003766 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3767 if (Op.isReg()) {
3768 unsigned Reg = getWRegFromXReg(Op.getReg());
3769 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3770 Op.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003771 }
3772 }
3773 }
3774
3775 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3776 if (NumOperands == 3 && Tok == "fmov") {
David Blaikie960ea3f2014-06-08 16:18:35 +00003777 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3778 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3779 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
Tim Northover3b0846e2014-05-24 12:50:23 +00003780 unsigned zreg =
3781 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
David Blaikie960ea3f2014-06-08 16:18:35 +00003782 RegOp.getReg())
Tim Northover3b0846e2014-05-24 12:50:23 +00003783 ? AArch64::WZR
3784 : AArch64::XZR;
David Blaikie960ea3f2014-06-08 16:18:35 +00003785 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3786 Op.getEndLoc(), getContext());
Tim Northover3b0846e2014-05-24 12:50:23 +00003787 }
3788 }
3789
3790 MCInst Inst;
3791 // First try to match against the secondary set of tables containing the
3792 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3793 unsigned MatchResult =
3794 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3795
3796 // If that fails, try against the alternate table containing long-form NEON:
3797 // "fadd v0.2s, v1.2s, v2.2s"
3798 if (MatchResult != Match_Success)
3799 MatchResult =
3800 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3801
3802 switch (MatchResult) {
3803 case Match_Success: {
3804 // Perform range checking and other semantic validations
3805 SmallVector<SMLoc, 8> OperandLocs;
3806 NumOperands = Operands.size();
3807 for (unsigned i = 1; i < NumOperands; ++i)
3808 OperandLocs.push_back(Operands[i]->getStartLoc());
3809 if (validateInstruction(Inst, OperandLocs))
3810 return true;
3811
3812 Inst.setLoc(IDLoc);
3813 Out.EmitInstruction(Inst, STI);
3814 return false;
3815 }
3816 case Match_MissingFeature: {
3817 assert(ErrorInfo && "Unknown missing feature!");
3818 // Special case the error message for the very common case where only
3819 // a single subtarget feature is missing (neon, e.g.).
3820 std::string Msg = "instruction requires:";
Tim Northover26bb14e2014-08-18 11:49:42 +00003821 uint64_t Mask = 1;
Tim Northover3b0846e2014-05-24 12:50:23 +00003822 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3823 if (ErrorInfo & Mask) {
3824 Msg += " ";
3825 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3826 }
3827 Mask <<= 1;
3828 }
3829 return Error(IDLoc, Msg);
3830 }
3831 case Match_MnemonicFail:
3832 return showMatchError(IDLoc, MatchResult);
3833 case Match_InvalidOperand: {
3834 SMLoc ErrorLoc = IDLoc;
Tim Northover26bb14e2014-08-18 11:49:42 +00003835 if (ErrorInfo != ~0ULL) {
Tim Northover3b0846e2014-05-24 12:50:23 +00003836 if (ErrorInfo >= Operands.size())
3837 return Error(IDLoc, "too few operands for instruction");
3838
David Blaikie960ea3f2014-06-08 16:18:35 +00003839 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
Tim Northover3b0846e2014-05-24 12:50:23 +00003840 if (ErrorLoc == SMLoc())
3841 ErrorLoc = IDLoc;
3842 }
3843 // If the match failed on a suffix token operand, tweak the diagnostic
3844 // accordingly.
David Blaikie960ea3f2014-06-08 16:18:35 +00003845 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3846 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
Tim Northover3b0846e2014-05-24 12:50:23 +00003847 MatchResult = Match_InvalidSuffix;
3848
3849 return showMatchError(ErrorLoc, MatchResult);
3850 }
3851 case Match_InvalidMemoryIndexed1:
3852 case Match_InvalidMemoryIndexed2:
3853 case Match_InvalidMemoryIndexed4:
3854 case Match_InvalidMemoryIndexed8:
3855 case Match_InvalidMemoryIndexed16:
3856 case Match_InvalidCondCode:
3857 case Match_AddSubRegExtendSmall:
3858 case Match_AddSubRegExtendLarge:
3859 case Match_AddSubSecondSource:
3860 case Match_LogicalSecondSource:
3861 case Match_AddSubRegShift32:
3862 case Match_AddSubRegShift64:
3863 case Match_InvalidMovImm32Shift:
3864 case Match_InvalidMovImm64Shift:
3865 case Match_InvalidFPImm:
3866 case Match_InvalidMemoryWExtend8:
3867 case Match_InvalidMemoryWExtend16:
3868 case Match_InvalidMemoryWExtend32:
3869 case Match_InvalidMemoryWExtend64:
3870 case Match_InvalidMemoryWExtend128:
3871 case Match_InvalidMemoryXExtend8:
3872 case Match_InvalidMemoryXExtend16:
3873 case Match_InvalidMemoryXExtend32:
3874 case Match_InvalidMemoryXExtend64:
3875 case Match_InvalidMemoryXExtend128:
3876 case Match_InvalidMemoryIndexed4SImm7:
3877 case Match_InvalidMemoryIndexed8SImm7:
3878 case Match_InvalidMemoryIndexed16SImm7:
3879 case Match_InvalidMemoryIndexedSImm9:
3880 case Match_InvalidImm0_7:
3881 case Match_InvalidImm0_15:
3882 case Match_InvalidImm0_31:
3883 case Match_InvalidImm0_63:
3884 case Match_InvalidImm0_127:
3885 case Match_InvalidImm0_65535:
3886 case Match_InvalidImm1_8:
3887 case Match_InvalidImm1_16:
3888 case Match_InvalidImm1_32:
3889 case Match_InvalidImm1_64:
3890 case Match_InvalidIndex1:
3891 case Match_InvalidIndexB:
3892 case Match_InvalidIndexH:
3893 case Match_InvalidIndexS:
3894 case Match_InvalidIndexD:
3895 case Match_InvalidLabel:
3896 case Match_MSR:
3897 case Match_MRS: {
Artyom Skrobov7e9e31e2014-05-29 11:26:15 +00003898 if (ErrorInfo >= Operands.size())
3899 return Error(IDLoc, "too few operands for instruction");
Tim Northover3b0846e2014-05-24 12:50:23 +00003900 // Any time we get here, there's nothing fancy to do. Just get the
3901 // operand SMLoc and display the diagnostic.
David Blaikie960ea3f2014-06-08 16:18:35 +00003902 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
Tim Northover3b0846e2014-05-24 12:50:23 +00003903 if (ErrorLoc == SMLoc())
3904 ErrorLoc = IDLoc;
3905 return showMatchError(ErrorLoc, MatchResult);
3906 }
3907 }
3908
3909 llvm_unreachable("Implement any new match types added!");
3910 return true;
3911}
3912
3913/// ParseDirective parses the arm specific directives
3914bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3915 StringRef IDVal = DirectiveID.getIdentifier();
3916 SMLoc Loc = DirectiveID.getLoc();
3917 if (IDVal == ".hword")
3918 return parseDirectiveWord(2, Loc);
3919 if (IDVal == ".word")
3920 return parseDirectiveWord(4, Loc);
3921 if (IDVal == ".xword")
3922 return parseDirectiveWord(8, Loc);
3923 if (IDVal == ".tlsdesccall")
3924 return parseDirectiveTLSDescCall(Loc);
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +00003925 if (IDVal == ".ltorg" || IDVal == ".pool")
3926 return parseDirectiveLtorg(Loc);
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +00003927 if (IDVal == ".unreq")
3928 return parseDirectiveUnreq(DirectiveID.getLoc());
3929
Tim Northover3b0846e2014-05-24 12:50:23 +00003930 return parseDirectiveLOH(IDVal, Loc);
3931}
3932
3933/// parseDirectiveWord
3934/// ::= .word [ expression (, expression)* ]
3935bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3936 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3937 for (;;) {
3938 const MCExpr *Value;
3939 if (getParser().parseExpression(Value))
3940 return true;
3941
3942 getParser().getStreamer().EmitValue(Value, Size);
3943
3944 if (getLexer().is(AsmToken::EndOfStatement))
3945 break;
3946
3947 // FIXME: Improve diagnostic.
3948 if (getLexer().isNot(AsmToken::Comma))
3949 return Error(L, "unexpected token in directive");
3950 Parser.Lex();
3951 }
3952 }
3953
3954 Parser.Lex();
3955 return false;
3956}
3957
3958// parseDirectiveTLSDescCall:
3959// ::= .tlsdesccall symbol
3960bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3961 StringRef Name;
3962 if (getParser().parseIdentifier(Name))
3963 return Error(L, "expected symbol after directive");
3964
3965 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3966 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3967 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3968
3969 MCInst Inst;
3970 Inst.setOpcode(AArch64::TLSDESCCALL);
3971 Inst.addOperand(MCOperand::CreateExpr(Expr));
3972
3973 getParser().getStreamer().EmitInstruction(Inst, STI);
3974 return false;
3975}
3976
3977/// ::= .loh <lohName | lohId> label1, ..., labelN
3978/// The number of arguments depends on the loh identifier.
3979bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3980 if (IDVal != MCLOHDirectiveName())
3981 return true;
3982 MCLOHType Kind;
3983 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3984 if (getParser().getTok().isNot(AsmToken::Integer))
3985 return TokError("expected an identifier or a number in directive");
3986 // We successfully get a numeric value for the identifier.
3987 // Check if it is valid.
3988 int64_t Id = getParser().getTok().getIntVal();
Alexey Samsonov700964e2014-08-29 22:34:28 +00003989 if (Id <= -1U && !isValidMCLOHType(Id))
Tim Northover3b0846e2014-05-24 12:50:23 +00003990 return TokError("invalid numeric identifier in directive");
Alexey Samsonov700964e2014-08-29 22:34:28 +00003991 Kind = (MCLOHType)Id;
Tim Northover3b0846e2014-05-24 12:50:23 +00003992 } else {
3993 StringRef Name = getTok().getIdentifier();
3994 // We successfully parse an identifier.
3995 // Check if it is a recognized one.
3996 int Id = MCLOHNameToId(Name);
3997
3998 if (Id == -1)
3999 return TokError("invalid identifier in directive");
4000 Kind = (MCLOHType)Id;
4001 }
4002 // Consume the identifier.
4003 Lex();
4004 // Get the number of arguments of this LOH.
4005 int NbArgs = MCLOHIdToNbArgs(Kind);
4006
4007 assert(NbArgs != -1 && "Invalid number of arguments");
4008
4009 SmallVector<MCSymbol *, 3> Args;
4010 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4011 StringRef Name;
4012 if (getParser().parseIdentifier(Name))
4013 return TokError("expected identifier in directive");
4014 Args.push_back(getContext().GetOrCreateSymbol(Name));
4015
4016 if (Idx + 1 == NbArgs)
4017 break;
4018 if (getLexer().isNot(AsmToken::Comma))
4019 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4020 Lex();
4021 }
4022 if (getLexer().isNot(AsmToken::EndOfStatement))
4023 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4024
4025 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4026 return false;
4027}
4028
Weiming Zhaob1d4dbd2014-06-24 16:21:38 +00004029/// parseDirectiveLtorg
4030/// ::= .ltorg | .pool
4031bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4032 getTargetStreamer().emitCurrentConstantPool();
4033 return false;
4034}
4035
Saleem Abdulrasool2e09c512014-07-02 04:50:23 +00004036/// parseDirectiveReq
4037/// ::= name .req registername
4038bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4039 Parser.Lex(); // Eat the '.req' token.
4040 SMLoc SRegLoc = getLoc();
4041 unsigned RegNum = tryParseRegister();
4042 bool IsVector = false;
4043
4044 if (RegNum == static_cast<unsigned>(-1)) {
4045 StringRef Kind;
4046 RegNum = tryMatchVectorRegister(Kind, false);
4047 if (!Kind.empty()) {
4048 Error(SRegLoc, "vector register without type specifier expected");
4049 return false;
4050 }
4051 IsVector = true;
4052 }
4053
4054 if (RegNum == static_cast<unsigned>(-1)) {
4055 Parser.eatToEndOfStatement();
4056 Error(SRegLoc, "register name or alias expected");
4057 return false;
4058 }
4059
4060 // Shouldn't be anything else.
4061 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4062 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4063 Parser.eatToEndOfStatement();
4064 return false;
4065 }
4066
4067 Parser.Lex(); // Consume the EndOfStatement
4068
4069 auto pair = std::make_pair(IsVector, RegNum);
4070 if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
4071 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4072
4073 return true;
4074}
4075
4076/// parseDirectiveUneq
4077/// ::= .unreq registername
4078bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4079 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4080 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4081 Parser.eatToEndOfStatement();
4082 return false;
4083 }
4084 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4085 Parser.Lex(); // Eat the identifier.
4086 return false;
4087}
4088
Tim Northover3b0846e2014-05-24 12:50:23 +00004089bool
4090AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4091 AArch64MCExpr::VariantKind &ELFRefKind,
4092 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4093 int64_t &Addend) {
4094 ELFRefKind = AArch64MCExpr::VK_INVALID;
4095 DarwinRefKind = MCSymbolRefExpr::VK_None;
4096 Addend = 0;
4097
4098 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4099 ELFRefKind = AE->getKind();
4100 Expr = AE->getSubExpr();
4101 }
4102
4103 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4104 if (SE) {
4105 // It's a simple symbol reference with no addend.
4106 DarwinRefKind = SE->getKind();
4107 return true;
4108 }
4109
4110 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4111 if (!BE)
4112 return false;
4113
4114 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4115 if (!SE)
4116 return false;
4117 DarwinRefKind = SE->getKind();
4118
4119 if (BE->getOpcode() != MCBinaryExpr::Add &&
4120 BE->getOpcode() != MCBinaryExpr::Sub)
4121 return false;
4122
4123 // See if the addend is is a constant, otherwise there's more going
4124 // on here than we can deal with.
4125 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4126 if (!AddendExpr)
4127 return false;
4128
4129 Addend = AddendExpr->getValue();
4130 if (BE->getOpcode() == MCBinaryExpr::Sub)
4131 Addend = -Addend;
4132
4133 // It's some symbol reference + a constant addend, but really
4134 // shouldn't use both Darwin and ELF syntax.
4135 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4136 DarwinRefKind == MCSymbolRefExpr::VK_None;
4137}
4138
4139/// Force static initialization.
4140extern "C" void LLVMInitializeAArch64AsmParser() {
4141 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4142 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
Tim Northover35910d72014-07-23 12:58:11 +00004143 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
Tim Northover3b0846e2014-05-24 12:50:23 +00004144}
4145
4146#define GET_REGISTER_MATCHER
4147#define GET_SUBTARGET_FEATURE_NAME
4148#define GET_MATCHER_IMPLEMENTATION
4149#include "AArch64GenAsmMatcher.inc"
4150
4151// Define this matcher function after the auto-generated include so we
4152// have the match class enum definitions.
David Blaikie960ea3f2014-06-08 16:18:35 +00004153unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
Tim Northover3b0846e2014-05-24 12:50:23 +00004154 unsigned Kind) {
David Blaikie960ea3f2014-06-08 16:18:35 +00004155 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
Tim Northover3b0846e2014-05-24 12:50:23 +00004156 // If the kind is a token for a literal immediate, check if our asm
4157 // operand matches. This is for InstAliases which have a fixed-value
4158 // immediate in the syntax.
4159 int64_t ExpectedVal;
4160 switch (Kind) {
4161 default:
4162 return Match_InvalidOperand;
4163 case MCK__35_0:
4164 ExpectedVal = 0;
4165 break;
4166 case MCK__35_1:
4167 ExpectedVal = 1;
4168 break;
4169 case MCK__35_12:
4170 ExpectedVal = 12;
4171 break;
4172 case MCK__35_16:
4173 ExpectedVal = 16;
4174 break;
4175 case MCK__35_2:
4176 ExpectedVal = 2;
4177 break;
4178 case MCK__35_24:
4179 ExpectedVal = 24;
4180 break;
4181 case MCK__35_3:
4182 ExpectedVal = 3;
4183 break;
4184 case MCK__35_32:
4185 ExpectedVal = 32;
4186 break;
4187 case MCK__35_4:
4188 ExpectedVal = 4;
4189 break;
4190 case MCK__35_48:
4191 ExpectedVal = 48;
4192 break;
4193 case MCK__35_6:
4194 ExpectedVal = 6;
4195 break;
4196 case MCK__35_64:
4197 ExpectedVal = 64;
4198 break;
4199 case MCK__35_8:
4200 ExpectedVal = 8;
4201 break;
4202 }
David Blaikie960ea3f2014-06-08 16:18:35 +00004203 if (!Op.isImm())
Tim Northover3b0846e2014-05-24 12:50:23 +00004204 return Match_InvalidOperand;
David Blaikie960ea3f2014-06-08 16:18:35 +00004205 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
Tim Northover3b0846e2014-05-24 12:50:23 +00004206 if (!CE)
4207 return Match_InvalidOperand;
4208 if (CE->getValue() == ExpectedVal)
4209 return Match_Success;
4210 return Match_InvalidOperand;
4211}