[AArch64] Deprecate a command-line option used for testing.
Support for pairing unscaled loads and stores has been enabled since the
original ARM64 port. This feature is no longer experimental, AFAICT.
llvm-svn: 249049
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 178dd24..c8dfa32 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -45,11 +45,6 @@
static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
cl::init(20), cl::Hidden);
-// Place holder while testing unscaled load/store combining
-static cl::opt<bool> EnableAArch64UnscaledMemOp(
- "aarch64-unscaled-mem-op", cl::Hidden,
- cl::desc("Allow AArch64 unscaled load/store combining"), cl::init(true));
-
namespace llvm {
void initializeAArch64LoadStoreOptPass(PassRegistry &);
}
@@ -462,8 +457,7 @@
unsigned Opc =
SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
bool IsUnscaled = isUnscaledLdSt(Opc);
- int OffsetStride =
- IsUnscaled && EnableAArch64UnscaledMemOp ? getMemScale(I) : 1;
+ int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
bool MergeForward = Flags.getMergeForward();
unsigned NewOpc = getMatchingPairOpcode(Opc);
@@ -492,7 +486,7 @@
}
// Handle Unscaled
int OffsetImm = getLdStOffsetOp(RtMI).getImm();
- if (IsUnscaled && EnableAArch64UnscaledMemOp)
+ if (IsUnscaled)
OffsetImm /= OffsetStride;
// Construct the new instruction.
@@ -650,8 +644,7 @@
// Early exit if the offset if not possible to match. (6 bits of positive
// range, plus allow an extra one in case we find a later insn that matches
// with Offset-1)
- int OffsetStride =
- IsUnscaled && EnableAArch64UnscaledMemOp ? getMemScale(FirstMI) : 1;
+ int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
return E;
@@ -719,8 +712,7 @@
// If the alignment requirements of the paired (scaled) instruction
// can't express the offset of the unscaled input, bail and keep
// looking.
- if (IsUnscaled && EnableAArch64UnscaledMemOp &&
- (alignTo(MinOffset, OffsetStride) != MinOffset)) {
+ if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
MemInsns.push_back(MI);
continue;