Add some tests and update an existing test to reflect recent
x86 isel peeps.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@92509 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/test-nofold.ll b/test/CodeGen/X86/test-nofold.ll
index 772ff6c..f1063dc 100644
--- a/test/CodeGen/X86/test-nofold.ll
+++ b/test/CodeGen/X86/test-nofold.ll
@@ -1,22 +1,35 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | grep {testl.*%e.x.*%e.x}
+; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck %s
 ; rdar://5752025
 
-; We don't want to fold the and into the test, because the and clobbers its
-; input forcing a copy.  We want:
-;	movl	$15, %ecx
-;	andl	4(%esp), %ecx
-;	testl	%ecx, %ecx
+; We want:
+;      CHECK: movl	4(%esp), %ecx
+; CHECK-NEXT: andl	$15, %ecx
+; CHECK-NEXT: movl	$42, %eax
+; CHECK-NEXT: cmovel	%ecx, %eax
+; CHECK-NEXT: ret
+;
+; We don't want:
+;	movl	4(%esp), %eax
+;	movl	%eax, %ecx     # bad: extra copy
+;	andl	$15, %ecx
+;	testl	$15, %eax      # bad: peep obstructed
 ;	movl	$42, %eax
-;	cmove	%ecx, %eax
+;	cmovel	%ecx, %eax
 ;	ret
 ;
-; Not:
-;	movl	4(%esp), %eax
-;	movl	%eax, %ecx
-;	andl	$15, %ecx
-;	testl	$15, %eax
+; We also don't want:
+;	movl	$15, %ecx      # bad: larger encoding
+;	andl	4(%esp), %ecx
 ;	movl	$42, %eax
-;	cmove	%ecx, %eax
+;	cmovel	%ecx, %eax
+;	ret
+;
+; We also don't want:
+;	movl	4(%esp), %ecx
+;	andl	$15, %ecx
+;	testl	%ecx, %ecx     # bad: unnecessary test
+;	movl	$42, %eax
+;	cmovel	%ecx, %eax
 ;	ret
 
 define i32 @t1(i32 %X) nounwind  {
diff --git a/test/CodeGen/X86/twoaddr-lea.ll b/test/CodeGen/X86/twoaddr-lea.ll
new file mode 100644
index 0000000..a245ed7
--- /dev/null
+++ b/test/CodeGen/X86/twoaddr-lea.ll
@@ -0,0 +1,24 @@
+;; X's live range extends beyond the shift, so the register allocator
+;; cannot coalesce it with Y.  Because of this, a copy needs to be
+;; emitted before the shift to save the register value before it is
+;; clobbered.  However, this copy is not needed if the register
+;; allocator turns the shift into an LEA.  This also occurs for ADD.
+
+; Check that the shift gets turned into an LEA.
+; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
+; RUN:   not grep {mov E.X, E.X}
+
+@G = external global i32                ; <i32*> [#uses=3]
+
+define i32 @test1(i32 %X, i32 %Y) {
+        %Z = add i32 %X, %Y             ; <i32> [#uses=1]
+        volatile store i32 %Y, i32* @G
+        volatile store i32 %Z, i32* @G
+        ret i32 %X
+}
+
+define i32 @test2(i32 %X) {
+        %Z = add i32 %X, 1              ; <i32> [#uses=1]
+        volatile store i32 %Z, i32* @G
+        ret i32 %X
+}
diff --git a/test/CodeGen/X86/use-add-flags.ll b/test/CodeGen/X86/use-add-flags.ll
new file mode 100644
index 0000000..8252030
--- /dev/null
+++ b/test/CodeGen/X86/use-add-flags.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -march=x86-64 -o - | FileCheck %s
+
+; Reuse the flags value from the add instructions instead of emitting separate
+; testl instructions.
+
+; Use the flags on the add.
+
+; CHECK: add_zf:
+;      CHECK: addl    (%rdi), %esi
+; CHECK-NEXT: movl    %edx, %eax
+; CHECK-NEXT: cmovnsl %ecx, %eax
+; CHECK-NEXT: ret
+
+define i32 @add_zf(i32* %x, i32 %y, i32 %a, i32 %b) nounwind {
+	%tmp2 = load i32* %x, align 4		; <i32> [#uses=1]
+	%tmp4 = add i32 %tmp2, %y		; <i32> [#uses=1]
+	%tmp5 = icmp slt i32 %tmp4, 0		; <i1> [#uses=1]
+	%tmp.0 = select i1 %tmp5, i32 %a, i32 %b		; <i32> [#uses=1]
+	ret i32 %tmp.0
+}
+
+declare void @foo(i32)
+
+; Don't use the flags result of the and here, since the and has no
+; other use. A simple test is better.
+
+; CHECK: bar:
+; CHECK: testb   $16, %dil
+
+define void @bar(i32 %x) nounwind {
+  %y = and i32 %x, 16
+  %t = icmp eq i32 %y, 0
+  br i1 %t, label %true, label %false
+true:
+  call void @foo(i32 %x)
+  ret void
+false:
+  ret void
+}
+
+; Do use the flags result of the and here, since the and has another use.
+
+; CHECK: qux:
+;      CHECK: andl    $16, %edi
+; CHECK-NEXT: jne     .LBB3_2
+
+define void @qux(i32 %x) nounwind {
+  %y = and i32 %x, 16
+  %t = icmp eq i32 %y, 0
+  br i1 %t, label %true, label %false
+true:
+  call void @foo(i32 %y)
+  ret void
+false:
+  ret void
+}