[DAGCombiner] Add add saturation constant folding tests.
Exposes an issue with sadd_sat for computeOverflowKind, so I've disabled it for now.
llvm-svn: 351057
diff --git a/llvm/test/CodeGen/X86/combine-add-usat.ll b/llvm/test/CodeGen/X86/combine-add-usat.ll
index 26fe088..d7ce71e 100644
--- a/llvm/test/CodeGen/X86/combine-add-usat.ll
+++ b/llvm/test/CodeGen/X86/combine-add-usat.ll
@@ -11,6 +11,35 @@
declare i64 @llvm.uadd.sat.i64 (i64, i64)
declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+; fold (uadd_sat c1, c2) -> c3
+define i32 @combine_constfold_i32() {
+; CHECK-LABEL: combine_constfold_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %ecx
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: addl $100, %eax
+; CHECK-NEXT: cmovbl %ecx, %eax
+; CHECK-NEXT: retq
+ %res = call i32 @llvm.uadd.sat.i32(i32 4294967295, i32 100)
+ ret i32 %res
+}
+
+define <8 x i16> @combine_constfold_v8i16() {
+; SSE-LABEL: combine_constfold_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
+; SSE-NEXT: paddusw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_constfold_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
+; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+ ret <8 x i16> %res
+}
+
; fold (uadd_sat c, x) -> (add_ssat x, c)
define i32 @combine_constant_i32(i32 %a0) {
; CHECK-LABEL: combine_constant_i32:
@@ -19,7 +48,7 @@
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: cmovael %edi, %eax
; CHECK-NEXT: retq
- %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0);
+ %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0)
ret i32 %1
}
@@ -33,7 +62,7 @@
; AVX: # %bb.0:
; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
- %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0);
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
ret <8 x i16> %1
}
@@ -43,7 +72,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
- %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0);
+ %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0)
ret i32 %1
}
@@ -51,7 +80,7 @@
; CHECK-LABEL: combine_zero_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: retq
- %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer);
+ %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
ret <8 x i16> %1
}
@@ -67,7 +96,7 @@
; CHECK-NEXT: retq
%1 = lshr i32 %a0, 16
%2 = lshr i32 %a1, 16
- %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2);
+ %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2)
ret i32 %3
}
@@ -87,6 +116,6 @@
; AVX-NEXT: retq
%1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
%2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
- %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2);
+ %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
ret <8 x i16> %3
}