[AArch64] Enable more load clustering in the MI Scheduler.
This patch adds unscaled loads and sign-extend loads to the TII
getMemOpBaseRegImmOfs API, which is used to control clustering in the MI
scheduler. This is done to create more opportunities for load pairing. I've
also added the scaled LDRSWui instruction, which was missing from the scaled
instructions. Finally, I've added support in shouldClusterLoads for clustering
adjacent sext and zext loads that too can be paired by the load/store optimizer.
Differential Revision: http://reviews.llvm.org/D18048
llvm-svn: 263819
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index de401b1..196c2bc 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -146,10 +146,6 @@
mergeUpdateInsn(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Update, bool IsPreIdx);
- // Is this a candidate for ld/st merging or pairing? For example, we don't
- // touch volatiles or load/stores that have a hint to avoid pair formation.
- bool isCandidateToMergeOrPair(MachineInstr *MI);
-
// Find and merge foldable ldr/str instructions.
bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
@@ -1588,29 +1584,6 @@
return false;
}
-bool AArch64LoadStoreOpt::isCandidateToMergeOrPair(MachineInstr *MI) {
- // If this is a volatile load/store, don't mess with it.
- if (MI->hasOrderedMemoryRef())
- return false;
-
- // Make sure this is a reg+imm (as opposed to an address reloc).
- if (!getLdStOffsetOp(MI).isImm())
- return false;
-
- // Can't merge/pair if the instruction modifies the base register.
- // e.g., ldr x0, [x0]
- unsigned BaseReg = getLdStBaseOp(MI).getReg();
- if (MI->modifiesRegister(BaseReg, TRI))
- return false;
-
- // Check if this load/store has a hint to avoid pair formation.
- // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
- if (TII->isLdStPairSuppressed(MI))
- return false;
-
- return true;
-}
-
// Find narrow loads that can be converted into a single wider load with
// bitfield extract instructions. Also merge adjacent zero stores into a wider
// store.
@@ -1621,7 +1594,7 @@
MachineInstr *MI = MBBI;
MachineBasicBlock::iterator E = MI->getParent()->end();
- if (!isCandidateToMergeOrPair(MI))
+ if (!TII->isCandidateToMergeOrPair(MI))
return false;
// For promotable zero stores, the stored value should be WZR.
@@ -1653,7 +1626,7 @@
MachineInstr *MI = MBBI;
MachineBasicBlock::iterator E = MI->getParent()->end();
- if (!isCandidateToMergeOrPair(MI))
+ if (!TII->isCandidateToMergeOrPair(MI))
return false;
// Early exit if the offset is not possible to match. (6 bits of positive