[X86][AArch64][DAGCombine] Unfold 'check for [no] signed truncation' pattern

Summary:

[[ https://bugs.llvm.org/show_bug.cgi?id=38149 | PR38149 ]]

As discussed in https://reviews.llvm.org/D49179#1158957 and later,
the IR for 'check for [no] signed truncation' pattern can be improved:
https://rise4fun.com/Alive/gBf
^ that pattern will be produced by Implicit Integer Truncation sanitizer,
https://reviews.llvm.org/D48958 https://bugs.llvm.org/show_bug.cgi?id=21530
in signed case, therefore it is probably a good idea to improve it.

But the IR-optimal patter does not lower efficiently, so we want to undo it..

This handles the simple pattern.
There is a second pattern with predicate and constants inverted.

NOTE: we do not check uses here. we always do the transform.

Reviewers: spatel, craig.topper, RKSimon, javed.absar

Reviewed By: spatel

Subscribers: kristof.beyls, llvm-commits

Differential Revision: https://reviews.llvm.org/D49266

llvm-svn: 337166
diff --git a/llvm/test/CodeGen/X86/signed-truncation-check.ll b/llvm/test/CodeGen/X86/signed-truncation-check.ll
index 91cdef2..f18307f 100644
--- a/llvm/test/CodeGen/X86/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/X86/signed-truncation-check.ll
@@ -299,19 +299,17 @@
 define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
 ; X86-LABEL: add_ultcmp_i16_i8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $128, %eax
-; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movzwl %ax, %eax
-; X86-NEXT:    cmpl $256, %eax # imm = 0x100
-; X86-NEXT:    setb %al
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_ultcmp_i16_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    subl $-128, %edi
-; X64-NEXT:    movzwl %di, %eax
-; X64-NEXT:    cmpl $256, %eax # imm = 0x100
-; X64-NEXT:    setb %al
+; X64-NEXT:    movsbl %dil, %eax
+; X64-NEXT:    cmpw %di, %ax
+; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
@@ -321,17 +319,17 @@
 define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
 ; X86-LABEL: add_ultcmp_i32_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $32768, %eax # imm = 0x8000
-; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    cmpl $65536, %eax # imm = 0x10000
-; X86-NEXT:    setb %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_ultcmp_i32_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    addl $32768, %edi # imm = 0x8000
-; X64-NEXT:    cmpl $65536, %edi # imm = 0x10000
-; X64-NEXT:    setb %al
+; X64-NEXT:    movswl %di, %eax
+; X64-NEXT:    cmpl %edi, %eax
+; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %tmp0 = add i32 %x, 32768 ; 1U << (16-1)
   %tmp1 = icmp ult i32 %tmp0, 65536 ; 1U << 16
@@ -341,17 +339,17 @@
 define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
 ; X86-LABEL: add_ultcmp_i32_i8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $128, %eax
-; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    cmpl $256, %eax # imm = 0x100
-; X86-NEXT:    setb %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_ultcmp_i32_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    subl $-128, %edi
-; X64-NEXT:    cmpl $256, %edi # imm = 0x100
-; X64-NEXT:    setb %al
+; X64-NEXT:    movsbl %dil, %eax
+; X64-NEXT:    cmpl %edi, %eax
+; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %tmp0 = add i32 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i32 %tmp0, 256 ; 1U << 8
@@ -362,16 +360,15 @@
 ; X86-LABEL: add_ultcmp_i64_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
-; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    sarl $31, %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_ultcmp_i64_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    subq $-2147483648, %rdi # imm = 0x80000000
-; X64-NEXT:    shrq $32, %rdi
+; X64-NEXT:    movslq %edi, %rax
+; X64-NEXT:    cmpq %rdi, %rax
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
@@ -383,19 +380,19 @@
 ; X86-LABEL: add_ultcmp_i64_i16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $32768, %ecx # imm = 0x8000
-; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    adcl $0, %eax
-; X86-NEXT:    cmpl $65536, %ecx # imm = 0x10000
-; X86-NEXT:    sbbl $0, %eax
-; X86-NEXT:    setb %al
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    sarl $31, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_ultcmp_i64_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    addq $32768, %rdi # imm = 0x8000
-; X64-NEXT:    cmpq $65536, %rdi # imm = 0x10000
-; X64-NEXT:    setb %al
+; X64-NEXT:    movswq %di, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %tmp0 = add i64 %x, 32768 ; 1U << (16-1)
   %tmp1 = icmp ult i64 %tmp0, 65536 ; 1U << 16
@@ -406,19 +403,19 @@
 ; X86-LABEL: add_ultcmp_i64_i8:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $128, %ecx
-; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    adcl $0, %eax
-; X86-NEXT:    cmpl $256, %ecx # imm = 0x100
-; X86-NEXT:    sbbl $0, %eax
-; X86-NEXT:    setb %al
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    sarl $31, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_ultcmp_i64_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    subq $-128, %rdi
-; X64-NEXT:    cmpq $256, %rdi # imm = 0x100
-; X64-NEXT:    setb %al
+; X64-NEXT:    movsbq %dil, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %tmp0 = add i64 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i64 %tmp0, 256 ; 1U << 8