Simon Pilgrim | 61cdeb4 | 2016-10-25 20:25:47 +0000 | [diff] [blame^] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE |
| 3 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX |
| 4 | |
| 5 | ; fold (sdiv undef, x) -> 0 |
| 6 | define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) { |
| 7 | ; SSE-LABEL: combine_vec_sdiv_undef0: |
| 8 | ; SSE: # BB#0: |
| 9 | ; SSE-NEXT: retq |
| 10 | ; |
| 11 | ; AVX-LABEL: combine_vec_sdiv_undef0: |
| 12 | ; AVX: # BB#0: |
| 13 | ; AVX-NEXT: retq |
| 14 | %1 = sdiv <4 x i32> undef, %x |
| 15 | ret <4 x i32> %1 |
| 16 | } |
| 17 | |
| 18 | ; fold (sdiv x, undef) -> undef |
| 19 | define <4 x i32> @combine_vec_sdiv_undef1(<4 x i32> %x) { |
| 20 | ; SSE-LABEL: combine_vec_sdiv_undef1: |
| 21 | ; SSE: # BB#0: |
| 22 | ; SSE-NEXT: retq |
| 23 | ; |
| 24 | ; AVX-LABEL: combine_vec_sdiv_undef1: |
| 25 | ; AVX: # BB#0: |
| 26 | ; AVX-NEXT: retq |
| 27 | %1 = sdiv <4 x i32> %x, undef |
| 28 | ret <4 x i32> %1 |
| 29 | } |
| 30 | |
| 31 | ; fold (sdiv x, 1) -> x |
| 32 | define <4 x i32> @combine_vec_sdiv_by_one(<4 x i32> %x) { |
| 33 | ; SSE-LABEL: combine_vec_sdiv_by_one: |
| 34 | ; SSE: # BB#0: |
| 35 | ; SSE-NEXT: retq |
| 36 | ; |
| 37 | ; AVX-LABEL: combine_vec_sdiv_by_one: |
| 38 | ; AVX: # BB#0: |
| 39 | ; AVX-NEXT: retq |
| 40 | %1 = sdiv <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> |
| 41 | ret <4 x i32> %1 |
| 42 | } |
| 43 | |
| 44 | ; fold (sdiv x, -1) -> 0 - x |
| 45 | define <4 x i32> @combine_vec_sdiv_by_negone(<4 x i32> %x) { |
| 46 | ; SSE-LABEL: combine_vec_sdiv_by_negone: |
| 47 | ; SSE: # BB#0: |
| 48 | ; SSE-NEXT: pxor %xmm1, %xmm1 |
| 49 | ; SSE-NEXT: psubd %xmm0, %xmm1 |
| 50 | ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 51 | ; SSE-NEXT: retq |
| 52 | ; |
| 53 | ; AVX-LABEL: combine_vec_sdiv_by_negone: |
| 54 | ; AVX: # BB#0: |
| 55 | ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| 56 | ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 |
| 57 | ; AVX-NEXT: retq |
| 58 | %1 = sdiv <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> |
| 59 | ret <4 x i32> %1 |
| 60 | } |
| 61 | |
| 62 | ; fold (sdiv x, y) -> (udiv x, y) iff x and y are positive |
| 63 | define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) { |
| 64 | ; SSE-LABEL: combine_vec_sdiv_by_pos0: |
| 65 | ; SSE: # BB#0: |
| 66 | ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 |
| 67 | ; SSE-NEXT: psrld $2, %xmm0 |
| 68 | ; SSE-NEXT: retq |
| 69 | ; |
| 70 | ; AVX-LABEL: combine_vec_sdiv_by_pos0: |
| 71 | ; AVX: # BB#0: |
| 72 | ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 |
| 73 | ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0 |
| 74 | ; AVX-NEXT: retq |
| 75 | %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> |
| 76 | %2 = sdiv <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4> |
| 77 | ret <4 x i32> %2 |
| 78 | } |
| 79 | |
| 80 | define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) { |
| 81 | ; SSE-LABEL: combine_vec_sdiv_by_pos1: |
| 82 | ; SSE: # BB#0: |
| 83 | ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 |
| 84 | ; SSE-NEXT: pextrd $1, %xmm0, %eax |
| 85 | ; SSE-NEXT: movl %eax, %ecx |
| 86 | ; SSE-NEXT: sarl $31, %ecx |
| 87 | ; SSE-NEXT: shrl $30, %ecx |
| 88 | ; SSE-NEXT: addl %eax, %ecx |
| 89 | ; SSE-NEXT: sarl $2, %ecx |
| 90 | ; SSE-NEXT: pextrd $2, %xmm0, %eax |
| 91 | ; SSE-NEXT: pextrd $3, %xmm0, %edx |
| 92 | ; SSE-NEXT: pinsrd $1, %ecx, %xmm0 |
| 93 | ; SSE-NEXT: movl %eax, %ecx |
| 94 | ; SSE-NEXT: sarl $31, %ecx |
| 95 | ; SSE-NEXT: shrl $29, %ecx |
| 96 | ; SSE-NEXT: addl %eax, %ecx |
| 97 | ; SSE-NEXT: sarl $3, %ecx |
| 98 | ; SSE-NEXT: pinsrd $2, %ecx, %xmm0 |
| 99 | ; SSE-NEXT: movl %edx, %eax |
| 100 | ; SSE-NEXT: sarl $31, %eax |
| 101 | ; SSE-NEXT: shrl $28, %eax |
| 102 | ; SSE-NEXT: addl %edx, %eax |
| 103 | ; SSE-NEXT: sarl $4, %eax |
| 104 | ; SSE-NEXT: pinsrd $3, %eax, %xmm0 |
| 105 | ; SSE-NEXT: retq |
| 106 | ; |
| 107 | ; AVX-LABEL: combine_vec_sdiv_by_pos1: |
| 108 | ; AVX: # BB#0: |
| 109 | ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 |
| 110 | ; AVX-NEXT: vpextrd $1, %xmm0, %eax |
| 111 | ; AVX-NEXT: movl %eax, %ecx |
| 112 | ; AVX-NEXT: sarl $31, %ecx |
| 113 | ; AVX-NEXT: shrl $30, %ecx |
| 114 | ; AVX-NEXT: addl %eax, %ecx |
| 115 | ; AVX-NEXT: sarl $2, %ecx |
| 116 | ; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm1 |
| 117 | ; AVX-NEXT: vpextrd $2, %xmm0, %eax |
| 118 | ; AVX-NEXT: movl %eax, %ecx |
| 119 | ; AVX-NEXT: sarl $31, %ecx |
| 120 | ; AVX-NEXT: shrl $29, %ecx |
| 121 | ; AVX-NEXT: addl %eax, %ecx |
| 122 | ; AVX-NEXT: sarl $3, %ecx |
| 123 | ; AVX-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 |
| 124 | ; AVX-NEXT: vpextrd $3, %xmm0, %eax |
| 125 | ; AVX-NEXT: movl %eax, %ecx |
| 126 | ; AVX-NEXT: sarl $31, %ecx |
| 127 | ; AVX-NEXT: shrl $28, %ecx |
| 128 | ; AVX-NEXT: addl %eax, %ecx |
| 129 | ; AVX-NEXT: sarl $4, %ecx |
| 130 | ; AVX-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm0 |
| 131 | ; AVX-NEXT: retq |
| 132 | %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> |
| 133 | %2 = sdiv <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16> |
| 134 | ret <4 x i32> %2 |
| 135 | } |
| 136 | |
| 137 | ; fold (sdiv x, (1 << c)) -> x >>u c |
| 138 | define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) { |
| 139 | ; SSE-LABEL: combine_vec_sdiv_by_pow2a: |
| 140 | ; SSE: # BB#0: |
| 141 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 142 | ; SSE-NEXT: psrad $31, %xmm1 |
| 143 | ; SSE-NEXT: psrld $30, %xmm1 |
| 144 | ; SSE-NEXT: paddd %xmm0, %xmm1 |
| 145 | ; SSE-NEXT: psrad $2, %xmm1 |
| 146 | ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 147 | ; SSE-NEXT: retq |
| 148 | ; |
| 149 | ; AVX-LABEL: combine_vec_sdiv_by_pow2a: |
| 150 | ; AVX: # BB#0: |
| 151 | ; AVX-NEXT: vpsrad $31, %xmm0, %xmm1 |
| 152 | ; AVX-NEXT: vpsrld $30, %xmm1, %xmm1 |
| 153 | ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| 154 | ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0 |
| 155 | ; AVX-NEXT: retq |
| 156 | %1 = sdiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4> |
| 157 | ret <4 x i32> %1 |
| 158 | } |
| 159 | |
| 160 | define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) { |
| 161 | ; SSE-LABEL: combine_vec_sdiv_by_pow2b: |
| 162 | ; SSE: # BB#0: |
| 163 | ; SSE-NEXT: pextrd $1, %xmm0, %eax |
| 164 | ; SSE-NEXT: movl %eax, %ecx |
| 165 | ; SSE-NEXT: sarl $31, %ecx |
| 166 | ; SSE-NEXT: shrl $30, %ecx |
| 167 | ; SSE-NEXT: addl %eax, %ecx |
| 168 | ; SSE-NEXT: sarl $2, %ecx |
| 169 | ; SSE-NEXT: pextrd $2, %xmm0, %eax |
| 170 | ; SSE-NEXT: pextrd $3, %xmm0, %edx |
| 171 | ; SSE-NEXT: pinsrd $1, %ecx, %xmm0 |
| 172 | ; SSE-NEXT: movl %eax, %ecx |
| 173 | ; SSE-NEXT: sarl $31, %ecx |
| 174 | ; SSE-NEXT: shrl $29, %ecx |
| 175 | ; SSE-NEXT: addl %eax, %ecx |
| 176 | ; SSE-NEXT: sarl $3, %ecx |
| 177 | ; SSE-NEXT: pinsrd $2, %ecx, %xmm0 |
| 178 | ; SSE-NEXT: movl %edx, %eax |
| 179 | ; SSE-NEXT: sarl $31, %eax |
| 180 | ; SSE-NEXT: shrl $28, %eax |
| 181 | ; SSE-NEXT: addl %edx, %eax |
| 182 | ; SSE-NEXT: sarl $4, %eax |
| 183 | ; SSE-NEXT: pinsrd $3, %eax, %xmm0 |
| 184 | ; SSE-NEXT: retq |
| 185 | ; |
| 186 | ; AVX-LABEL: combine_vec_sdiv_by_pow2b: |
| 187 | ; AVX: # BB#0: |
| 188 | ; AVX-NEXT: vpextrd $1, %xmm0, %eax |
| 189 | ; AVX-NEXT: movl %eax, %ecx |
| 190 | ; AVX-NEXT: sarl $31, %ecx |
| 191 | ; AVX-NEXT: shrl $30, %ecx |
| 192 | ; AVX-NEXT: addl %eax, %ecx |
| 193 | ; AVX-NEXT: sarl $2, %ecx |
| 194 | ; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm1 |
| 195 | ; AVX-NEXT: vpextrd $2, %xmm0, %eax |
| 196 | ; AVX-NEXT: movl %eax, %ecx |
| 197 | ; AVX-NEXT: sarl $31, %ecx |
| 198 | ; AVX-NEXT: shrl $29, %ecx |
| 199 | ; AVX-NEXT: addl %eax, %ecx |
| 200 | ; AVX-NEXT: sarl $3, %ecx |
| 201 | ; AVX-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 |
| 202 | ; AVX-NEXT: vpextrd $3, %xmm0, %eax |
| 203 | ; AVX-NEXT: movl %eax, %ecx |
| 204 | ; AVX-NEXT: sarl $31, %ecx |
| 205 | ; AVX-NEXT: shrl $28, %ecx |
| 206 | ; AVX-NEXT: addl %eax, %ecx |
| 207 | ; AVX-NEXT: sarl $4, %ecx |
| 208 | ; AVX-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm0 |
| 209 | ; AVX-NEXT: retq |
| 210 | %1 = sdiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> |
| 211 | ret <4 x i32> %1 |
| 212 | } |