If we have a load of a global address that's not modified during the
function, then go ahead and hoist it out of the loop. This is the result:
$ cat a.c
volatile int G;
int A(int N) {
for (; N > 0; --N)
G++;
}
$ llc -o - -relocation-model=pic
_A:
...
LBB1_2: # bb
movl L_G$non_lazy_ptr-"L1$pb"(%eax), %esi
incl (%esi)
incl %edx
cmpl %ecx, %edx
jne LBB1_2 # bb
...
$ llc -o - -relocation-model=pic -machine-licm
_A:
...
movl L_G$non_lazy_ptr-"L1$pb"(%eax), %eax
LBB1_2: # bb
incl (%eax)
incl %edx
cmpl %ecx, %edx
jne LBB1_2 # bb
...
I'm limiting this to the MOV32rm x86 instruction for now.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45444 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 08a26e1..4f7d51e 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -144,6 +144,37 @@
return true;
}
+/// isDefinedInEntryBlock - Goes through the entry block to see if the given
+/// virtual register is indeed defined in the entry block.
+///
+bool X86InstrInfo::isDefinedInEntryBlock(const MachineBasicBlock &Entry,
+ unsigned VReg) const {
+ assert(MRegisterInfo::isVirtualRegister(VReg) &&
+ "Map only holds virtual registers!");
+ MachineInstrMap.grow(VReg);
+ if (MachineInstrMap[VReg]) return true;
+
+ MachineBasicBlock::const_iterator I = Entry.begin(), E = Entry.end();
+
+ for (; I != E; ++I) {
+ const MachineInstr &MI = *I;
+ unsigned NumOps = MI.getNumOperands();
+
+ for (unsigned i = 0; i < NumOps; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
+
+ if(MO.isRegister() && MO.isDef() &&
+ MRegisterInfo::isVirtualRegister(MO.getReg()) &&
+ MO.getReg() == VReg) {
+ MachineInstrMap[VReg] = &MI;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
/// isReallySideEffectFree - If the M_MAY_HAVE_SIDE_EFFECTS flag is set, this
/// method is called to determine if the specific instance of this instruction
/// has side effects. This is useful in cases of instructions, like loads, which
@@ -152,10 +183,25 @@
bool X86InstrInfo::isReallySideEffectFree(MachineInstr *MI) const {
switch (MI->getOpcode()) {
default: break;
+ case X86::MOV32rm:
+ if (MI->getOperand(1).isRegister()) {
+ unsigned Reg = MI->getOperand(1).getReg();
+
+ // Loads from global addresses which aren't redefined in the function are
+ // side effect free.
+ if (MRegisterInfo::isVirtualRegister(Reg) &&
+ isDefinedInEntryBlock(MI->getParent()->getParent()->front(), Reg) &&
+ MI->getOperand(2).isImmediate() &&
+ MI->getOperand(3).isRegister() &&
+ MI->getOperand(4).isGlobalAddress() &&
+ MI->getOperand(2).getImmedValue() == 1 &&
+ MI->getOperand(3).getReg() == 0)
+ return true;
+ }
+ // FALLTHROUGH
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV16_rm:
- case X86::MOV32rm:
case X86::MOV32_rm:
case X86::MOV64rm:
case X86::LD_Fp64m:
@@ -166,8 +212,10 @@
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
// Loads from constant pools have no side effects
- return MI->getOperand(1).isRegister() && MI->getOperand(2).isImmediate() &&
- MI->getOperand(3).isRegister() && MI->getOperand(4).isConstantPoolIndex() &&
+ return MI->getOperand(1).isRegister() &&
+ MI->getOperand(2).isImmediate() &&
+ MI->getOperand(3).isRegister() &&
+ MI->getOperand(4).isConstantPoolIndex() &&
MI->getOperand(1).getReg() == 0 &&
MI->getOperand(2).getImmedValue() == 1 &&
MI->getOperand(3).getReg() == 0;