Fix PR6497, a bug where we'd fold a load into an addc
node which has a flag.  That flag in turn was used by an
already-selected adde which turned into an ADC32ri8 which
used a selected load which was chained to the load we
folded.  This flag use caused us to form a cycle.  Fix
this by not ignoring chains in IsLegalToFold even in
cases where the isel thinks it can.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@97791 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index b9ec4c4..f426fb1 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -1485,7 +1485,14 @@
       break;
     Root = FU;
     VT = Root->getValueType(Root->getNumValues()-1);
+    
+    // If our query node has a flag result with a use, we've walked up it.  If
+    // the user (which has already been selected) has a chain or indirectly uses
+    // the chain, our WalkChainUsers predicate will not consider it.  Because of
+    // this, we cannot ignore chains in this predicate.
+    IgnoreChains = false;
   }
+  
 
   SmallPtrSet<SDNode*, 16> Visited;
   return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll
new file mode 100644
index 0000000..1e13046
--- /dev/null
+++ b/test/CodeGen/X86/crash.ll
@@ -0,0 +1,20 @@
+; RUN: llc -march=x86 %s -o -
+; RUN: llc -march=x86-64 %s -o -
+
+; PR6497
+
+; Chain and flag folding issues.
+define i32 @test1() nounwind ssp {
+entry:
+  %tmp5.i = volatile load i32* undef              ; <i32> [#uses=1]
+  %conv.i = zext i32 %tmp5.i to i64               ; <i64> [#uses=1]
+  %tmp12.i = volatile load i32* undef             ; <i32> [#uses=1]
+  %conv13.i = zext i32 %tmp12.i to i64            ; <i64> [#uses=1]
+  %shl.i = shl i64 %conv13.i, 32                  ; <i64> [#uses=1]
+  %or.i = or i64 %shl.i, %conv.i                  ; <i64> [#uses=1]
+  %add16.i = add i64 %or.i, 256                   ; <i64> [#uses=1]
+  %shr.i = lshr i64 %add16.i, 8                   ; <i64> [#uses=1]
+  %conv19.i = trunc i64 %shr.i to i32             ; <i32> [#uses=1]
+  volatile store i32 %conv19.i, i32* undef
+  ret i32 undef
+}