We lower setb to sbb with the hope that the and will go away, when it
doesn't, match it back to setb.
On a 64-bit version of the testcase before we'd get:
movq %rdi, %rax
addq %rsi, %rax
sbbb %dl, %dl
andb $1, %dl
ret
now we get:
movq %rdi, %rax
addq %rsi, %rax
setb %dl
ret
llvm-svn: 122217
diff --git a/llvm/test/CodeGen/X86/add.ll b/llvm/test/CodeGen/X86/add.ll
index 3991a68..1201c06 100644
--- a/llvm/test/CodeGen/X86/add.ll
+++ b/llvm/test/CodeGen/X86/add.ll
@@ -92,3 +92,12 @@
; X64: ret
}
+define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ ret {i32, i1} %t
+}
+
+; X64: test7:
+; X64: addl %esi, %eax
+; X64-NEXT: setb %dl
+; X64-NEXT: ret