Simon Pilgrim | 9961c55 | 2019-01-13 21:21:46 +0000 | [diff] [blame^] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2 |
| 3 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41 |
| 4 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42 |
| 5 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1 |
| 6 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2 |
| 7 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F |
| 8 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW |
| 9 | |
| 10 | declare i32 @llvm.uadd.sat.i32 (i32, i32) |
| 11 | declare i64 @llvm.uadd.sat.i64 (i64, i64) |
| 12 | declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) |
| 13 | |
| 14 | ; fold (uadd_sat c, x) -> (add_ssat x, c) |
| 15 | define i32 @combine_constant_i32(i32 %a0) { |
| 16 | ; CHECK-LABEL: combine_constant_i32: |
| 17 | ; CHECK: # %bb.0: |
| 18 | ; CHECK-NEXT: addl $1, %edi |
| 19 | ; CHECK-NEXT: movl $-1, %eax |
| 20 | ; CHECK-NEXT: cmovael %edi, %eax |
| 21 | ; CHECK-NEXT: retq |
| 22 | %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0); |
| 23 | ret i32 %1 |
| 24 | } |
| 25 | |
| 26 | define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) { |
| 27 | ; SSE-LABEL: combine_constant_v8i16: |
| 28 | ; SSE: # %bb.0: |
| 29 | ; SSE-NEXT: paddusw {{.*}}(%rip), %xmm0 |
| 30 | ; SSE-NEXT: retq |
| 31 | ; |
| 32 | ; AVX-LABEL: combine_constant_v8i16: |
| 33 | ; AVX: # %bb.0: |
| 34 | ; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0 |
| 35 | ; AVX-NEXT: retq |
| 36 | %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0); |
| 37 | ret <8 x i16> %1 |
| 38 | } |
| 39 | |
| 40 | ; fold (uadd_sat c, 0) -> x |
| 41 | define i32 @combine_zero_i32(i32 %a0) { |
| 42 | ; CHECK-LABEL: combine_zero_i32: |
| 43 | ; CHECK: # %bb.0: |
| 44 | ; CHECK-NEXT: addl $0, %edi |
| 45 | ; CHECK-NEXT: movl $-1, %eax |
| 46 | ; CHECK-NEXT: cmovael %edi, %eax |
| 47 | ; CHECK-NEXT: retq |
| 48 | %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0); |
| 49 | ret i32 %1 |
| 50 | } |
| 51 | |
| 52 | define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) { |
| 53 | ; SSE-LABEL: combine_zero_v8i16: |
| 54 | ; SSE: # %bb.0: |
| 55 | ; SSE-NEXT: pxor %xmm1, %xmm1 |
| 56 | ; SSE-NEXT: paddusw %xmm1, %xmm0 |
| 57 | ; SSE-NEXT: retq |
| 58 | ; |
| 59 | ; AVX-LABEL: combine_zero_v8i16: |
| 60 | ; AVX: # %bb.0: |
| 61 | ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| 62 | ; AVX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 |
| 63 | ; AVX-NEXT: retq |
| 64 | %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer); |
| 65 | ret <8 x i16> %1 |
| 66 | } |
| 67 | |
| 68 | ; fold (uadd_sat x, y) -> (add x, y) iff no overflow |
| 69 | define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) { |
| 70 | ; CHECK-LABEL: combine_no_overflow_i32: |
| 71 | ; CHECK: # %bb.0: |
| 72 | ; CHECK-NEXT: shrl $16, %edi |
| 73 | ; CHECK-NEXT: shrl $16, %esi |
| 74 | ; CHECK-NEXT: addl %edi, %esi |
| 75 | ; CHECK-NEXT: movl $-1, %eax |
| 76 | ; CHECK-NEXT: cmovael %esi, %eax |
| 77 | ; CHECK-NEXT: retq |
| 78 | %1 = lshr i32 %a0, 16 |
| 79 | %2 = lshr i32 %a1, 16 |
| 80 | %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2); |
| 81 | ret i32 %3 |
| 82 | } |
| 83 | |
| 84 | define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) { |
| 85 | ; SSE-LABEL: combine_no_overflow_v8i16: |
| 86 | ; SSE: # %bb.0: |
| 87 | ; SSE-NEXT: psrlw $10, %xmm0 |
| 88 | ; SSE-NEXT: psrlw $10, %xmm1 |
| 89 | ; SSE-NEXT: paddusw %xmm1, %xmm0 |
| 90 | ; SSE-NEXT: retq |
| 91 | ; |
| 92 | ; AVX-LABEL: combine_no_overflow_v8i16: |
| 93 | ; AVX: # %bb.0: |
| 94 | ; AVX-NEXT: vpsrlw $10, %xmm0, %xmm0 |
| 95 | ; AVX-NEXT: vpsrlw $10, %xmm1, %xmm1 |
| 96 | ; AVX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 |
| 97 | ; AVX-NEXT: retq |
| 98 | %1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10> |
| 99 | %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10> |
| 100 | %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2); |
| 101 | ret <8 x i16> %3 |
| 102 | } |