X86: Avoid implicit iterator conversions, NFC

Avoid implicit conversions from MachineInstrBundleIterator to
MachineInstr*, mainly by preferring MachineInstr& over MachineInstr* and
using range-based for loops.

llvm-svn: 275149
diff --git a/llvm/lib/Target/X86/X86VZeroUpper.cpp b/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 944c76a..9320e1e 100644
--- a/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -127,9 +127,9 @@
   return true;
 }
 
-static bool hasYmmReg(MachineInstr *MI) {
-  for (const MachineOperand &MO : MI->operands()) {
-    if (MI->isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
+static bool hasYmmReg(MachineInstr &MI) {
+  for (const MachineOperand &MO : MI.operands()) {
+    if (MI.isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
       return true;
     if (!MO.isReg())
       continue;
@@ -142,9 +142,9 @@
 }
 
 /// Check if any YMM register will be clobbered by this instruction.
-static bool callClobbersAnyYmmReg(MachineInstr *MI) {
-  assert(MI->isCall() && "Can only be called on call instructions.");
-  for (const MachineOperand &MO : MI->operands()) {
+static bool callClobbersAnyYmmReg(MachineInstr &MI) {
+  assert(MI.isCall() && "Can only be called on call instructions.");
+  for (const MachineOperand &MO : MI.operands()) {
     if (!MO.isRegMask())
       continue;
     for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
@@ -181,16 +181,14 @@
   BlockExitState CurState = PASS_THROUGH;
   BlockStates[MBB.getNumber()].FirstUnguardedCall = MBB.end();
 
-  for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
-    MachineInstr *MI = I;
+  for (MachineInstr &MI : MBB) {
     // No need for vzeroupper before iret in interrupt handler function,
     // epilogue will restore YMM registers if needed.
-    bool IsReturnFromX86INTR = IsX86INTR && MI->isReturn();
-    bool IsControlFlow = MI->isCall() || MI->isReturn();
+    bool IsReturnFromX86INTR = IsX86INTR && MI.isReturn();
+    bool IsControlFlow = MI.isCall() || MI.isReturn();
 
     // An existing VZERO* instruction resets the state.
-    if (MI->getOpcode() == X86::VZEROALL ||
-        MI->getOpcode() == X86::VZEROUPPER) {
+    if (MI.getOpcode() == X86::VZEROALL || MI.getOpcode() == X86::VZEROUPPER) {
       CurState = EXITS_CLEAN;
       continue;
     }
@@ -216,7 +214,7 @@
     // standard calling convention is not used (RegMask is not used to mark
     // register clobbered and register usage (def/imp-def/use) is well-defined
     // and explicitly specified.
-    if (MI->isCall() && !callClobbersAnyYmmReg(MI))
+    if (MI.isCall() && !callClobbersAnyYmmReg(MI))
       continue;
 
     // The VZEROUPPER instruction resets the upper 128 bits of all AVX
@@ -230,7 +228,7 @@
       // After the inserted VZEROUPPER the state becomes clean again, but
       // other YMM may appear before other subsequent calls or even before
       // the end of the BB.
-      insertVZeroUpper(I, MBB);
+      insertVZeroUpper(MI, MBB);
       CurState = EXITS_CLEAN;
     } else if (CurState == PASS_THROUGH) {
       // If this block is currently in pass-through state and we encounter a
@@ -238,7 +236,7 @@
       // block has successors that exit dirty. Record the location of the call,
       // and set the state to EXITS_CLEAN, but do not insert the vzeroupper yet.
       // It will be inserted later if necessary.
-      BlockStates[MBB.getNumber()].FirstUnguardedCall = I;
+      BlockStates[MBB.getNumber()].FirstUnguardedCall = MI;
       CurState = EXITS_CLEAN;
     }
   }