Simon Pilgrim | 46a804c | 2017-10-03 16:56:57 +0000 | [diff] [blame^] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-SSE2 |
| 3 | ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX2 |
| 4 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2 |
| 5 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2 |
| 6 | |
| 7 | define i32 @PR15215_bad(<4 x i32> %input) { |
| 8 | ; X32-SSE2-LABEL: PR15215_bad: |
| 9 | ; X32-SSE2: # BB#0: # %entry |
| 10 | ; X32-SSE2-NEXT: pslld $31, %xmm0 |
| 11 | ; X32-SSE2-NEXT: psrad $31, %xmm0 |
| 12 | ; X32-SSE2-NEXT: movmskps %xmm0, %eax |
| 13 | ; X32-SSE2-NEXT: retl |
| 14 | ; |
| 15 | ; X32-AVX2-LABEL: PR15215_bad: |
| 16 | ; X32-AVX2: # BB#0: # %entry |
| 17 | ; X32-AVX2-NEXT: vpslld $31, %xmm0, %xmm0 |
| 18 | ; X32-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 |
| 19 | ; X32-AVX2-NEXT: vmovmskps %xmm0, %eax |
| 20 | ; X32-AVX2-NEXT: retl |
| 21 | ; |
| 22 | ; X64-SSE2-LABEL: PR15215_bad: |
| 23 | ; X64-SSE2: # BB#0: # %entry |
| 24 | ; X64-SSE2-NEXT: pslld $31, %xmm0 |
| 25 | ; X64-SSE2-NEXT: psrad $31, %xmm0 |
| 26 | ; X64-SSE2-NEXT: movmskps %xmm0, %eax |
| 27 | ; X64-SSE2-NEXT: retq |
| 28 | ; |
| 29 | ; X64-AVX2-LABEL: PR15215_bad: |
| 30 | ; X64-AVX2: # BB#0: # %entry |
| 31 | ; X64-AVX2-NEXT: vpslld $31, %xmm0, %xmm0 |
| 32 | ; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 |
| 33 | ; X64-AVX2-NEXT: vmovmskps %xmm0, %eax |
| 34 | ; X64-AVX2-NEXT: retq |
| 35 | entry: |
| 36 | %0 = trunc <4 x i32> %input to <4 x i1> |
| 37 | %1 = bitcast <4 x i1> %0 to i4 |
| 38 | %2 = zext i4 %1 to i32 |
| 39 | ret i32 %2 |
| 40 | } |
| 41 | |
| 42 | define i32 @PR15215_good(<4 x i32> %input) { |
| 43 | ; X32-SSE2-LABEL: PR15215_good: |
| 44 | ; X32-SSE2: # BB#0: # %entry |
| 45 | ; X32-SSE2-NEXT: pushl %esi |
| 46 | ; X32-SSE2-NEXT: .Lcfi0: |
| 47 | ; X32-SSE2-NEXT: .cfi_def_cfa_offset 8 |
| 48 | ; X32-SSE2-NEXT: .Lcfi1: |
| 49 | ; X32-SSE2-NEXT: .cfi_offset %esi, -8 |
| 50 | ; X32-SSE2-NEXT: movd %xmm0, %eax |
| 51 | ; X32-SSE2-NEXT: andl $1, %eax |
| 52 | ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] |
| 53 | ; X32-SSE2-NEXT: movd %xmm1, %ecx |
| 54 | ; X32-SSE2-NEXT: andl $1, %ecx |
| 55 | ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] |
| 56 | ; X32-SSE2-NEXT: movd %xmm1, %edx |
| 57 | ; X32-SSE2-NEXT: andl $1, %edx |
| 58 | ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] |
| 59 | ; X32-SSE2-NEXT: movd %xmm0, %esi |
| 60 | ; X32-SSE2-NEXT: andl $1, %esi |
| 61 | ; X32-SSE2-NEXT: leal (%eax,%ecx,2), %eax |
| 62 | ; X32-SSE2-NEXT: leal (%eax,%edx,4), %eax |
| 63 | ; X32-SSE2-NEXT: leal (%eax,%esi,8), %eax |
| 64 | ; X32-SSE2-NEXT: popl %esi |
| 65 | ; X32-SSE2-NEXT: retl |
| 66 | ; |
| 67 | ; X32-AVX2-LABEL: PR15215_good: |
| 68 | ; X32-AVX2: # BB#0: # %entry |
| 69 | ; X32-AVX2-NEXT: pushl %esi |
| 70 | ; X32-AVX2-NEXT: .Lcfi0: |
| 71 | ; X32-AVX2-NEXT: .cfi_def_cfa_offset 8 |
| 72 | ; X32-AVX2-NEXT: .Lcfi1: |
| 73 | ; X32-AVX2-NEXT: .cfi_offset %esi, -8 |
| 74 | ; X32-AVX2-NEXT: vmovd %xmm0, %eax |
| 75 | ; X32-AVX2-NEXT: andl $1, %eax |
| 76 | ; X32-AVX2-NEXT: vpextrd $1, %xmm0, %ecx |
| 77 | ; X32-AVX2-NEXT: andl $1, %ecx |
| 78 | ; X32-AVX2-NEXT: vpextrd $2, %xmm0, %edx |
| 79 | ; X32-AVX2-NEXT: andl $1, %edx |
| 80 | ; X32-AVX2-NEXT: vpextrd $3, %xmm0, %esi |
| 81 | ; X32-AVX2-NEXT: andl $1, %esi |
| 82 | ; X32-AVX2-NEXT: leal (%eax,%ecx,2), %eax |
| 83 | ; X32-AVX2-NEXT: leal (%eax,%edx,4), %eax |
| 84 | ; X32-AVX2-NEXT: leal (%eax,%esi,8), %eax |
| 85 | ; X32-AVX2-NEXT: popl %esi |
| 86 | ; X32-AVX2-NEXT: retl |
| 87 | ; |
| 88 | ; X64-SSE2-LABEL: PR15215_good: |
| 89 | ; X64-SSE2: # BB#0: # %entry |
| 90 | ; X64-SSE2-NEXT: movd %xmm0, %eax |
| 91 | ; X64-SSE2-NEXT: andl $1, %eax |
| 92 | ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] |
| 93 | ; X64-SSE2-NEXT: movd %xmm1, %ecx |
| 94 | ; X64-SSE2-NEXT: andl $1, %ecx |
| 95 | ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] |
| 96 | ; X64-SSE2-NEXT: movd %xmm1, %edx |
| 97 | ; X64-SSE2-NEXT: andl $1, %edx |
| 98 | ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] |
| 99 | ; X64-SSE2-NEXT: movd %xmm0, %esi |
| 100 | ; X64-SSE2-NEXT: andl $1, %esi |
| 101 | ; X64-SSE2-NEXT: leal (%rax,%rcx,2), %eax |
| 102 | ; X64-SSE2-NEXT: leal (%rax,%rdx,4), %eax |
| 103 | ; X64-SSE2-NEXT: leal (%rax,%rsi,8), %eax |
| 104 | ; X64-SSE2-NEXT: retq |
| 105 | ; |
| 106 | ; X64-AVX2-LABEL: PR15215_good: |
| 107 | ; X64-AVX2: # BB#0: # %entry |
| 108 | ; X64-AVX2-NEXT: vmovd %xmm0, %eax |
| 109 | ; X64-AVX2-NEXT: andl $1, %eax |
| 110 | ; X64-AVX2-NEXT: vpextrd $1, %xmm0, %ecx |
| 111 | ; X64-AVX2-NEXT: andl $1, %ecx |
| 112 | ; X64-AVX2-NEXT: vpextrd $2, %xmm0, %edx |
| 113 | ; X64-AVX2-NEXT: andl $1, %edx |
| 114 | ; X64-AVX2-NEXT: vpextrd $3, %xmm0, %esi |
| 115 | ; X64-AVX2-NEXT: andl $1, %esi |
| 116 | ; X64-AVX2-NEXT: leal (%rax,%rcx,2), %eax |
| 117 | ; X64-AVX2-NEXT: leal (%rax,%rdx,4), %eax |
| 118 | ; X64-AVX2-NEXT: leal (%rax,%rsi,8), %eax |
| 119 | ; X64-AVX2-NEXT: retq |
| 120 | entry: |
| 121 | %0 = trunc <4 x i32> %input to <4 x i1> |
| 122 | %1 = extractelement <4 x i1> %0, i32 0 |
| 123 | %e1 = select i1 %1, i32 1, i32 0 |
| 124 | %2 = extractelement <4 x i1> %0, i32 1 |
| 125 | %e2 = select i1 %2, i32 2, i32 0 |
| 126 | %3 = extractelement <4 x i1> %0, i32 2 |
| 127 | %e3 = select i1 %3, i32 4, i32 0 |
| 128 | %4 = extractelement <4 x i1> %0, i32 3 |
| 129 | %e4 = select i1 %4, i32 8, i32 0 |
| 130 | %5 = or i32 %e1, %e2 |
| 131 | %6 = or i32 %5, %e3 |
| 132 | %7 = or i32 %6, %e4 |
| 133 | ret i32 %7 |
| 134 | } |