X86 Peephole: fold loads to the source register operand if possible.

Add more comments and use early returns to reduce nesting in isLoadFoldable.
Also disable folding for V_SET0 to avoid introducing a const pool entry and
a const pool load.

rdar://10554090 and rdar://11873276


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@161207 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp
index d9474bf..6bc7e37 100644
--- a/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/lib/CodeGen/PeepholeOptimizer.cpp
@@ -391,20 +391,21 @@
 /// register defined has a single use.
 bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
                                        unsigned &FoldAsLoadDefReg) {
-  if (MI->canFoldAsLoad()) {
-    const MCInstrDesc &MCID = MI->getDesc();
-    if (MCID.getNumDefs() == 1) {
-      unsigned Reg = MI->getOperand(0).getReg();
-      // To reduce compilation time, we check MRI->hasOneUse when inserting
-      // loads. It should be checked when processing uses of the load, since
-      // uses can be removed during peephole.
-      if (!MI->getOperand(0).getSubReg() &&
-          TargetRegisterInfo::isVirtualRegister(Reg) &&
-          MRI->hasOneUse(Reg)) {
-        FoldAsLoadDefReg = Reg;
-        return true;
-      }
-    }
+  if (!MI->canFoldAsLoad() || !MI->mayLoad())
+    return false;
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (MCID.getNumDefs() != 1)
+    return false;
+
+  unsigned Reg = MI->getOperand(0).getReg();
+  // To reduce compilation time, we check MRI->hasOneUse when inserting
+  // loads. It should be checked when processing uses of the load, since
+  // uses can be removed during peephole.
+  if (!MI->getOperand(0).getSubReg() &&
+      TargetRegisterInfo::isVirtualRegister(Reg) &&
+      MRI->hasOneUse(Reg)) {
+    FoldAsLoadDefReg = Reg;
+    return true;
   }
   return false;
 }