[AArch64] Keep track of MIFlags in the LoadStoreOptimizer
Merging:
* $x26, $x25 = frame-setup LDPXi $sp, 0
* $sp = frame-destroy ADDXri $sp, 64, 0
into an LDPXpost should preserve the flags from both instructions as
following:
* frame-setup frame-destroy LDPXpost
Differential Revision: https://reviews.llvm.org/D44446
llvm-svn: 327533
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 5f3989b..3fbf50d 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -381,6 +381,12 @@
return std::make_pair(MemBegin, CombinedNumMemRefs);
}
+uint8_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
+ // For now, the just return the union of the flags. If the flags get more
+ // complicated over time, we might need more logic here.
+ return getFlags() | Other.getFlags();
+}
+
bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
assert(!isBundledWithPred() && "Must be called on bundle header");
for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 8a29456..84f161a 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -702,7 +702,8 @@
.addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
.add(BaseRegOp)
.addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*MergeMI));
+ .setMemRefs(I->mergeMemRefsWith(*MergeMI))
+ .setMIFlags(I->mergeFlagsWith(*MergeMI));
(void)MIB;
DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ");
@@ -818,7 +819,8 @@
.add(RegOp1)
.add(BaseRegOp)
.addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*Paired));
+ .setMemRefs(I->mergeMemRefsWith(*Paired))
+ .setMIFlags(I->mergeFlagsWith(*Paired));
(void)MIB;
@@ -913,7 +915,8 @@
TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
.addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
.add(StMO)
- .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
+ .setMIFlags(LoadI->getFlags());
} else {
// FIXME: Currently we disable this transformation in big-endian targets as
// performance and correctness are verified only in little-endian.
@@ -954,7 +957,8 @@
TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
DestReg)
.add(StMO)
- .addImm(AndMaskEncoded);
+ .addImm(AndMaskEncoded)
+ .setMIFlags(LoadI->getFlags());
} else {
BitExtMI =
BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
@@ -962,7 +966,8 @@
DestReg)
.add(StMO)
.addImm(Immr)
- .addImm(Imms);
+ .addImm(Imms)
+ .setMIFlags(LoadI->getFlags());
}
}
@@ -1352,7 +1357,8 @@
.add(getLdStRegOp(*I))
.add(getLdStBaseOp(*I))
.addImm(Value)
- .setMemRefs(I->memoperands_begin(), I->memoperands_end());
+ .setMemRefs(I->memoperands_begin(), I->memoperands_end())
+ .setMIFlags(I->mergeFlagsWith(*Update));
} else {
// Paired instruction.
int Scale = getMemScale(*I);
@@ -1362,7 +1368,8 @@
.add(getLdStRegOp(*I, 1))
.add(getLdStBaseOp(*I))
.addImm(Value / Scale)
- .setMemRefs(I->memoperands_begin(), I->memoperands_end());
+ .setMemRefs(I->memoperands_begin(), I->memoperands_end())
+ .setMIFlags(I->mergeFlagsWith(*Update));
}
(void)MIB;