now that addc/adde are gone, "ADDC" in the X86 backend uses EFLAGS results,
the same as setcc. Optimize ADDC(0,0,FLAGS) -> SET_CARRY(FLAGS). This is
a step towards finishing off PR5443. In the testcase in that bug we now get:
movq %rdi, %rax
addq %rsi, %rax
sbbq %rcx, %rcx
testb $1, %cl
setne %dl
ret
instead of:
movq %rdi, %rax
addq %rsi, %rax
movl $0, %ecx
adcq $0, %rcx
testq %rcx, %rcx
setne %dl
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122219 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/add.ll b/test/CodeGen/X86/add.ll
index 1201c06..d6e99ee 100644
--- a/test/CodeGen/X86/add.ll
+++ b/test/CodeGen/X86/add.ll
@@ -101,3 +101,22 @@
; X64: addl %esi, %eax
; X64-NEXT: setb %dl
; X64-NEXT: ret
+
+; PR5443
+define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
+entry:
+ %extleft = zext i64 %left to i65
+ %extright = zext i64 %right to i65
+ %sum = add i65 %extleft, %extright
+ %res.0 = trunc i65 %sum to i64
+ %overflow = and i65 %sum, -18446744073709551616
+ %res.1 = icmp ne i65 %overflow, 0
+ %final0 = insertvalue {i64, i1} undef, i64 %res.0, 0
+ %final1 = insertvalue {i64, i1} %final0, i1 %res.1, 1
+ ret {i64, i1} %final1
+}
+
+; X64: test8:
+; X64: addq
+; X64-NEXT: sbbq
+; X64-NEXT: testb