[AArch64][AsmParser] Extend RegOp with integrated 'shift/extend'.
Summary:
In some cases the shift/extend needs to be explicitly parsed together
with the register, rather than as a separate operand. This is needed
for addressing modes where the instruction as a whole dictates the
scaling/extend, rather than specific bits in the instruction.
By parsing them as a single operand, we avoid the need to pass an
extra operand in all CodeGen patterns (because all operands need to
have an associated value), and we avoid the need to update TableGen to
accept operands that have no associated bits in the instruction.
An added benefit of parsing them together is that the assembler
can give a sensible diagnostic if the scaling is not correct.
This is patch [2/4] in a series to add assembler/disassembler support for
SVE's contiguous LD1 (scalar+scalar) instructions:
- Patch [1/4]: https://reviews.llvm.org/D45687
- Patch [2/4]: https://reviews.llvm.org/D45688
- Patch [3/4]: https://reviews.llvm.org/D45689
- Patch [4/4]: https://reviews.llvm.org/D45690
Reviewers: fhahn, rengolin, javed.absar, huntergr, SjoerdMeijer, t.p.northover, echristo, evandro
Reviewed By: fhahn, SjoerdMeijer
Subscribers: kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45688
llvm-svn: 330394
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 1a8d172..b5d819a 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -138,6 +138,8 @@
bool tryParseNeonVectorRegister(OperandVector &Operands);
OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
+ template <bool ParseShiftExtend>
+ OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
template <bool ParseSuffix>
OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
@@ -209,11 +211,33 @@
bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
};
+ // Separate shift/extend operand.
+ struct ShiftExtendOp {
+ AArch64_AM::ShiftExtendType Type;
+ unsigned Amount;
+ bool HasExplicitAmount;
+ };
+
struct RegOp {
unsigned RegNum;
RegKind Kind;
-
int ElementWidth;
+
+ // In some cases the shift/extend needs to be explicitly parsed together
+ // with the register, rather than as a separate operand. This is needed
+ // for addressing modes where the instruction as a whole dictates the
+ // scaling/extend, rather than specific bits in the instruction.
+ // By parsing them as a single operand, we avoid the need to pass an
+ // extra operand in all CodeGen patterns (because all operands need to
+ // have an associated value), and we avoid the need to update TableGen to
+ // accept operands that have no associated bits in the instruction.
+ //
+ // An added benefit of parsing them together is that the assembler
+ // can give a sensible diagnostic if the scaling is not correct.
+ //
+ // The default is 'lsl #0' (HasExplicitAmount = false) if no
+ // ShiftExtend is specified.
+ ShiftExtendOp ShiftExtend;
};
struct VectorListOp {
@@ -275,12 +299,6 @@
unsigned Val;
};
- struct ShiftExtendOp {
- AArch64_AM::ShiftExtendType Type;
- unsigned Amount;
- bool HasExplicitAmount;
- };
-
struct ExtendOp {
unsigned Val;
};
@@ -460,18 +478,27 @@
}
AArch64_AM::ShiftExtendType getShiftExtendType() const {
- assert(Kind == k_ShiftExtend && "Invalid access!");
- return ShiftExtend.Type;
+ if (Kind == k_ShiftExtend)
+ return ShiftExtend.Type;
+ if (Kind == k_Register)
+ return Reg.ShiftExtend.Type;
+ llvm_unreachable("Invalid access!");
}
unsigned getShiftExtendAmount() const {
- assert(Kind == k_ShiftExtend && "Invalid access!");
- return ShiftExtend.Amount;
+ if (Kind == k_ShiftExtend)
+ return ShiftExtend.Amount;
+ if (Kind == k_Register)
+ return Reg.ShiftExtend.Amount;
+ llvm_unreachable("Invalid access!");
}
bool hasShiftExtendAmount() const {
- assert(Kind == k_ShiftExtend && "Invalid access!");
- return ShiftExtend.HasExplicitAmount;
+ if (Kind == k_ShiftExtend)
+ return ShiftExtend.HasExplicitAmount;
+ if (Kind == k_Register)
+ return Reg.ShiftExtend.HasExplicitAmount;
+ llvm_unreachable("Invalid access!");
}
bool isImm() const override { return Kind == k_Immediate; }
@@ -847,11 +874,6 @@
Reg.RegNum);
}
- bool isGPR64sp0() const {
- return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
- AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
- }
-
template<int64_t Angle, int64_t Remainder>
bool isComplexRotation() const {
if (!isImm()) return false;
@@ -863,6 +885,20 @@
return (Value % Angle == Remainder && Value <= 270);
}
+ template <unsigned RegClassID> bool isGPR64() const {
+ return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
+ AArch64MCRegisterClasses[RegClassID].contains(getReg());
+ }
+
+ template <unsigned RegClassID, int ExtWidth>
+ bool isGPR64WithShiftExtend() const {
+ if (!isGPR64<RegClassID>())
+ return false;
+
+ return getShiftExtendType() == AArch64_AM::LSL &&
+ getShiftExtendAmount() == Log2_32(ExtWidth / 8);
+ }
+
/// Is this a vector list with the type implicit (presumably attached to the
/// instruction itself)?
template <RegKind VectorKind, unsigned NumRegs>
@@ -1583,10 +1619,17 @@
}
static std::unique_ptr<AArch64Operand>
- CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx) {
+ CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
+ AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
+ unsigned ShiftAmount = 0,
+ unsigned HasExplicitAmount = false) {
auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
Op->Reg.RegNum = RegNum;
Op->Reg.Kind = Kind;
+ Op->Reg.ElementWidth = 0;
+ Op->Reg.ShiftExtend.Type = ExtTy;
+ Op->Reg.ShiftExtend.Amount = ShiftAmount;
+ Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
@@ -1594,16 +1637,16 @@
static std::unique_ptr<AArch64Operand>
CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
- SMLoc S, SMLoc E, MCContext &Ctx) {
+ SMLoc S, SMLoc E, MCContext &Ctx,
+ AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
+ unsigned ShiftAmount = 0,
+ unsigned HasExplicitAmount = false) {
assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
Kind == RegKind::SVEPredicateVector) &&
"Invalid vector kind");
- auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
- Op->Reg.RegNum = RegNum;
+ auto Op = CreateReg(RegNum, Kind, S, E, Ctx, ExtTy, ShiftAmount,
+ HasExplicitAmount);
Op->Reg.ElementWidth = ElementWidth;
- Op->Reg.Kind = Kind;
- Op->StartLoc = S;
- Op->EndLoc = E;
return Op;
}
@@ -1776,9 +1819,6 @@
case k_CondCode:
OS << "<condcode " << getCondCode() << ">";
break;
- case k_Register:
- OS << "<register " << getReg() << ">";
- break;
case k_VectorList: {
OS << "<vectorlist ";
unsigned Reg = getVectorListStart();
@@ -1810,6 +1850,11 @@
case k_PSBHint:
OS << getPSBHintName();
break;
+ case k_Register:
+ OS << "<register " << getReg() << ">";
+ if (!getShiftExtendAmount() && !hasShiftExtendAmount())
+ break;
+ LLVM_FALLTHROUGH;
case k_ShiftExtend:
OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
<< getShiftExtendAmount();
@@ -2802,19 +2847,15 @@
/// parseRegister - Parse a register operand.
bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
- SMLoc S = getLoc();
// Try for a Neon vector register.
if (!tryParseNeonVectorRegister(Operands))
return false;
- // Try for a scalar register.
- unsigned Reg;
- if (tryParseScalarRegister(Reg) != MatchOperand_Success)
- return true;
- Operands.push_back(AArch64Operand::CreateReg(Reg, RegKind::Scalar, S,
- getLoc(), getContext()));
+ // Otherwise try for a scalar register.
+ if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
+ return false;
- return false;
+ return true;
}
bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
@@ -3051,6 +3092,41 @@
return MatchOperand_Success;
}
+template <bool ParseShiftExtend>
+OperandMatchResultTy
+AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
+ SMLoc StartLoc = getLoc();
+
+ unsigned RegNum;
+ OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
+ if (Res != MatchOperand_Success)
+ return Res;
+
+ // No shift/extend is the default.
+ if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
+ Operands.push_back(AArch64Operand::CreateReg(
+ RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
+ return MatchOperand_Success;
+ }
+
+ // Eat the comma
+ getParser().Lex();
+
+ // Match the shift
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
+ Res = tryParseOptionalShiftExtend(ExtOpnd);
+ if (Res != MatchOperand_Success)
+ return Res;
+
+ auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
+ Operands.push_back(AArch64Operand::CreateReg(RegNum, RegKind::Scalar,
+ StartLoc, Ext->getEndLoc(), getContext(),
+ Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
+ Ext->hasShiftExtendAmount()));
+
+ return MatchOperand_Success;
+}
+
bool AArch64AsmParser::parseOptionalMulVl(OperandVector &Operands) {
MCAsmParser &Parser = getParser();