fix PR3537: if resetting bbi back to the start of a block, we need to
forget about already inserted expressions.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64362 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 0a1c641..12c76e8 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -1241,11 +1241,13 @@
   // computation.
   Value *&SunkAddr = SunkAddrs[Addr];
   if (SunkAddr) {
-    DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n");
+    DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
+               << *MemoryInst);
     if (SunkAddr->getType() != Addr->getType())
       SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt);
   } else {
-    DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n");
+    DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
+               << *MemoryInst);
     const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType();
 
     Value *Result = 0;
@@ -1505,9 +1507,12 @@
       if (TLI && isa<InlineAsm>(CI->getCalledValue()))
         if (const TargetAsmInfo *TAI =
             TLI->getTargetMachine().getTargetAsmInfo()) {
-          if (TAI->ExpandInlineAsm(CI))
+          if (TAI->ExpandInlineAsm(CI)) {
             BBI = BB.begin();
-          else
+            // Avoid processing instructions out of order, which could cause
+            // reuse before a value is defined.
+            SunkAddrs.clear();
+          } else
             // Sink address computing for memory operands into the block.
             MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
         }