Simon Pilgrim | 476560a | 2016-10-18 19:28:12 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE |
| 3 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX |
| 4 | |
| 5 | ; fold (srl 0, x) -> 0 |
| 6 | define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) { |
| 7 | ; SSE-LABEL: combine_vec_lshr_zero: |
| 8 | ; SSE: # BB#0: |
| 9 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 10 | ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| 11 | ; SSE-NEXT: pxor %xmm1, %xmm1 |
| 12 | ; SSE-NEXT: pxor %xmm3, %xmm3 |
| 13 | ; SSE-NEXT: psrld %xmm2, %xmm3 |
| 14 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 15 | ; SSE-NEXT: psrlq $32, %xmm2 |
| 16 | ; SSE-NEXT: pxor %xmm4, %xmm4 |
| 17 | ; SSE-NEXT: psrld %xmm2, %xmm4 |
| 18 | ; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7] |
| 19 | ; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero |
| 20 | ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| 21 | ; SSE-NEXT: pxor %xmm3, %xmm3 |
| 22 | ; SSE-NEXT: psrld %xmm0, %xmm3 |
| 23 | ; SSE-NEXT: psrld %xmm2, %xmm1 |
| 24 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] |
| 25 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] |
| 26 | ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 27 | ; SSE-NEXT: retq |
| 28 | ; |
| 29 | ; AVX-LABEL: combine_vec_lshr_zero: |
| 30 | ; AVX: # BB#0: |
| 31 | ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| 32 | ; AVX-NEXT: vpsrlvd %xmm0, %xmm1, %xmm0 |
| 33 | ; AVX-NEXT: retq |
| 34 | %1 = lshr <4 x i32> zeroinitializer, %x |
| 35 | ret <4 x i32> %1 |
| 36 | } |
| 37 | |
| 38 | ; fold (srl x, c >= size(x)) -> undef |
| 39 | define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) { |
| 40 | ; SSE-LABEL: combine_vec_lshr_outofrange0: |
| 41 | ; SSE: # BB#0: |
| 42 | ; SSE-NEXT: retq |
| 43 | ; |
| 44 | ; AVX-LABEL: combine_vec_lshr_outofrange0: |
| 45 | ; AVX: # BB#0: |
| 46 | ; AVX-NEXT: retq |
| 47 | %1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33> |
| 48 | ret <4 x i32> %1 |
| 49 | } |
| 50 | |
| 51 | define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) { |
| 52 | ; SSE-LABEL: combine_vec_lshr_outofrange1: |
| 53 | ; SSE: # BB#0: |
| 54 | ; SSE-NEXT: xorps %xmm0, %xmm0 |
| 55 | ; SSE-NEXT: retq |
| 56 | ; |
| 57 | ; AVX-LABEL: combine_vec_lshr_outofrange1: |
| 58 | ; AVX: # BB#0: |
| 59 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 60 | ; AVX-NEXT: retq |
| 61 | %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36> |
| 62 | ret <4 x i32> %1 |
| 63 | } |
| 64 | |
| 65 | ; fold (srl x, 0) -> x |
| 66 | define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) { |
| 67 | ; SSE-LABEL: combine_vec_lshr_by_zero: |
| 68 | ; SSE: # BB#0: |
| 69 | ; SSE-NEXT: retq |
| 70 | ; |
| 71 | ; AVX-LABEL: combine_vec_lshr_by_zero: |
| 72 | ; AVX: # BB#0: |
| 73 | ; AVX-NEXT: retq |
| 74 | %1 = lshr <4 x i32> %x, zeroinitializer |
| 75 | ret <4 x i32> %1 |
| 76 | } |
| 77 | |
| 78 | ; if (srl x, c) is known to be zero, return 0 |
| 79 | define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) { |
| 80 | ; SSE-LABEL: combine_vec_lshr_known_zero0: |
| 81 | ; SSE: # BB#0: |
| 82 | ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 |
| 83 | ; SSE-NEXT: psrld $4, %xmm0 |
| 84 | ; SSE-NEXT: retq |
| 85 | ; |
| 86 | ; AVX-LABEL: combine_vec_lshr_known_zero0: |
| 87 | ; AVX: # BB#0: |
| 88 | ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 |
| 89 | ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 |
| 90 | ; AVX-NEXT: vpsrld $4, %xmm0, %xmm0 |
| 91 | ; AVX-NEXT: retq |
| 92 | %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15> |
| 93 | %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4> |
| 94 | ret <4 x i32> %2 |
| 95 | } |
| 96 | |
| 97 | define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) { |
| 98 | ; SSE-LABEL: combine_vec_lshr_known_zero1: |
| 99 | ; SSE: # BB#0: |
| 100 | ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 |
| 101 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 102 | ; SSE-NEXT: psrld $11, %xmm1 |
| 103 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 104 | ; SSE-NEXT: psrld $9, %xmm2 |
| 105 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| 106 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 107 | ; SSE-NEXT: psrld $10, %xmm1 |
| 108 | ; SSE-NEXT: psrld $8, %xmm0 |
| 109 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 110 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| 111 | ; SSE-NEXT: retq |
| 112 | ; |
| 113 | ; AVX-LABEL: combine_vec_lshr_known_zero1: |
| 114 | ; AVX: # BB#0: |
| 115 | ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 |
| 116 | ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 |
| 117 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 118 | ; AVX-NEXT: retq |
| 119 | %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15> |
| 120 | %2 = lshr <4 x i32> %1, <i32 8, i32 9, i32 10, i32 11> |
| 121 | ret <4 x i32> %2 |
| 122 | } |
| 123 | |
| 124 | ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2)) |
| 125 | define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) { |
| 126 | ; SSE-LABEL: combine_vec_lshr_lshr0: |
| 127 | ; SSE: # BB#0: |
| 128 | ; SSE-NEXT: psrld $6, %xmm0 |
| 129 | ; SSE-NEXT: retq |
| 130 | ; |
| 131 | ; AVX-LABEL: combine_vec_lshr_lshr0: |
| 132 | ; AVX: # BB#0: |
| 133 | ; AVX-NEXT: vpsrld $6, %xmm0, %xmm0 |
| 134 | ; AVX-NEXT: retq |
| 135 | %1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2> |
| 136 | %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4> |
| 137 | ret <4 x i32> %2 |
| 138 | } |
| 139 | |
| 140 | define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) { |
| 141 | ; SSE-LABEL: combine_vec_lshr_lshr1: |
| 142 | ; SSE: # BB#0: |
| 143 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 144 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 145 | ; SSE-NEXT: psrld $2, %xmm1 |
| 146 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 147 | ; SSE-NEXT: psrld $3, %xmm0 |
| 148 | ; SSE-NEXT: psrld $1, %xmm2 |
| 149 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] |
| 150 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] |
| 151 | ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 152 | ; SSE-NEXT: psrld $7, %xmm0 |
| 153 | ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| 154 | ; SSE-NEXT: psrld $5, %xmm2 |
| 155 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] |
| 156 | ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 157 | ; SSE-NEXT: psrld $6, %xmm0 |
| 158 | ; SSE-NEXT: psrld $4, %xmm1 |
| 159 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] |
| 160 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] |
| 161 | ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 162 | ; SSE-NEXT: retq |
| 163 | ; |
| 164 | ; AVX-LABEL: combine_vec_lshr_lshr1: |
| 165 | ; AVX: # BB#0: |
| 166 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 167 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 168 | ; AVX-NEXT: retq |
| 169 | %1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3> |
| 170 | %2 = lshr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7> |
| 171 | ret <4 x i32> %2 |
| 172 | } |
| 173 | |
| 174 | ; fold (srl (srl x, c1), c2) -> 0 |
| 175 | define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) { |
| 176 | ; SSE-LABEL: combine_vec_lshr_lshr_zero0: |
| 177 | ; SSE: # BB#0: |
| 178 | ; SSE-NEXT: xorps %xmm0, %xmm0 |
| 179 | ; SSE-NEXT: retq |
| 180 | ; |
| 181 | ; AVX-LABEL: combine_vec_lshr_lshr_zero0: |
| 182 | ; AVX: # BB#0: |
| 183 | ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| 184 | ; AVX-NEXT: retq |
| 185 | %1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16> |
| 186 | %2 = lshr <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20> |
| 187 | ret <4 x i32> %2 |
| 188 | } |
| 189 | |
| 190 | define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) { |
| 191 | ; SSE-LABEL: combine_vec_lshr_lshr_zero1: |
| 192 | ; SSE: # BB#0: |
| 193 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 194 | ; SSE-NEXT: psrld $20, %xmm1 |
| 195 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 196 | ; SSE-NEXT: psrld $18, %xmm2 |
| 197 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| 198 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 199 | ; SSE-NEXT: psrld $19, %xmm1 |
| 200 | ; SSE-NEXT: psrld $17, %xmm0 |
| 201 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 202 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| 203 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 204 | ; SSE-NEXT: psrld $28, %xmm1 |
| 205 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 206 | ; SSE-NEXT: psrld $26, %xmm2 |
| 207 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| 208 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 209 | ; SSE-NEXT: psrld $27, %xmm1 |
| 210 | ; SSE-NEXT: psrld $25, %xmm0 |
| 211 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 212 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| 213 | ; SSE-NEXT: retq |
| 214 | ; |
| 215 | ; AVX-LABEL: combine_vec_lshr_lshr_zero1: |
| 216 | ; AVX: # BB#0: |
| 217 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 218 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 219 | ; AVX-NEXT: retq |
| 220 | %1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20> |
| 221 | %2 = lshr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28> |
| 222 | ret <4 x i32> %2 |
| 223 | } |
| 224 | |
| 225 | ; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2))) |
| 226 | define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) { |
| 227 | ; SSE-LABEL: combine_vec_lshr_trunc_lshr0: |
| 228 | ; SSE: # BB#0: |
| 229 | ; SSE-NEXT: psrlq $32, %xmm0 |
| 230 | ; SSE-NEXT: psrlq $32, %xmm1 |
| 231 | ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| 232 | ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| 233 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 234 | ; SSE-NEXT: psrld $16, %xmm0 |
| 235 | ; SSE-NEXT: retq |
| 236 | ; |
| 237 | ; AVX-LABEL: combine_vec_lshr_trunc_lshr0: |
| 238 | ; AVX: # BB#0: |
| 239 | ; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 |
| 240 | ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] |
| 241 | ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] |
| 242 | ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 |
| 243 | ; AVX-NEXT: vzeroupper |
| 244 | ; AVX-NEXT: retq |
| 245 | %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32> |
| 246 | %2 = trunc <4 x i64> %1 to <4 x i32> |
| 247 | %3 = lshr <4 x i32> %2, <i32 16, i32 16, i32 16, i32 16> |
| 248 | ret <4 x i32> %3 |
| 249 | } |
| 250 | |
| 251 | define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) { |
| 252 | ; SSE-LABEL: combine_vec_lshr_trunc_lshr1: |
| 253 | ; SSE: # BB#0: |
| 254 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 255 | ; SSE-NEXT: psrlq $33, %xmm2 |
| 256 | ; SSE-NEXT: psrlq $32, %xmm0 |
| 257 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] |
| 258 | ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| 259 | ; SSE-NEXT: psrlq $35, %xmm2 |
| 260 | ; SSE-NEXT: psrlq $34, %xmm1 |
| 261 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] |
| 262 | ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| 263 | ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| 264 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 265 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 266 | ; SSE-NEXT: psrld $19, %xmm1 |
| 267 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 268 | ; SSE-NEXT: psrld $17, %xmm2 |
| 269 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| 270 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 271 | ; SSE-NEXT: psrld $18, %xmm1 |
| 272 | ; SSE-NEXT: psrld $16, %xmm0 |
| 273 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 274 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| 275 | ; SSE-NEXT: retq |
| 276 | ; |
| 277 | ; AVX-LABEL: combine_vec_lshr_trunc_lshr1: |
| 278 | ; AVX: # BB#0: |
| 279 | ; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 |
| 280 | ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] |
| 281 | ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] |
| 282 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 283 | ; AVX-NEXT: vzeroupper |
| 284 | ; AVX-NEXT: retq |
| 285 | %1 = lshr <4 x i64> %x, <i64 32, i64 33, i64 34, i64 35> |
| 286 | %2 = trunc <4 x i64> %1 to <4 x i32> |
| 287 | %3 = lshr <4 x i32> %2, <i32 16, i32 17, i32 18, i32 19> |
| 288 | ret <4 x i32> %3 |
| 289 | } |
| 290 | |
| 291 | ; fold (srl (trunc (srl x, c1)), c2) -> 0 |
| 292 | define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) { |
| 293 | ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0: |
| 294 | ; SSE: # BB#0: |
| 295 | ; SSE-NEXT: psrlq $48, %xmm0 |
| 296 | ; SSE-NEXT: psrlq $48, %xmm1 |
| 297 | ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| 298 | ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| 299 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 300 | ; SSE-NEXT: psrld $24, %xmm0 |
| 301 | ; SSE-NEXT: retq |
| 302 | ; |
| 303 | ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0: |
| 304 | ; AVX: # BB#0: |
| 305 | ; AVX-NEXT: vpsrlq $48, %ymm0, %ymm0 |
| 306 | ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] |
| 307 | ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] |
| 308 | ; AVX-NEXT: vpsrld $24, %xmm0, %xmm0 |
| 309 | ; AVX-NEXT: vzeroupper |
| 310 | ; AVX-NEXT: retq |
| 311 | %1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48> |
| 312 | %2 = trunc <4 x i64> %1 to <4 x i32> |
| 313 | %3 = lshr <4 x i32> %2, <i32 24, i32 24, i32 24, i32 24> |
| 314 | ret <4 x i32> %3 |
| 315 | } |
| 316 | |
| 317 | define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) { |
| 318 | ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1: |
| 319 | ; SSE: # BB#0: |
| 320 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 321 | ; SSE-NEXT: psrlq $49, %xmm2 |
| 322 | ; SSE-NEXT: psrlq $48, %xmm0 |
| 323 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] |
| 324 | ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| 325 | ; SSE-NEXT: psrlq $51, %xmm2 |
| 326 | ; SSE-NEXT: psrlq $50, %xmm1 |
| 327 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] |
| 328 | ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| 329 | ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| 330 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 331 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 332 | ; SSE-NEXT: psrld $27, %xmm1 |
| 333 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 334 | ; SSE-NEXT: psrld $25, %xmm2 |
| 335 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| 336 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 337 | ; SSE-NEXT: psrld $26, %xmm1 |
| 338 | ; SSE-NEXT: psrld $24, %xmm0 |
| 339 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 340 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| 341 | ; SSE-NEXT: retq |
| 342 | ; |
| 343 | ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1: |
| 344 | ; AVX: # BB#0: |
| 345 | ; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 |
| 346 | ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] |
| 347 | ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] |
| 348 | ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 |
| 349 | ; AVX-NEXT: vzeroupper |
| 350 | ; AVX-NEXT: retq |
| 351 | %1 = lshr <4 x i64> %x, <i64 48, i64 49, i64 50, i64 51> |
| 352 | %2 = trunc <4 x i64> %1 to <4 x i32> |
| 353 | %3 = lshr <4 x i32> %2, <i32 24, i32 25, i32 26, i32 27> |
| 354 | ret <4 x i32> %3 |
| 355 | } |
| 356 | |
| 357 | ; fold (srl (shl x, c), c) -> (and x, cst2) |
| 358 | define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) { |
| 359 | ; SSE-LABEL: combine_vec_lshr_shl_mask0: |
| 360 | ; SSE: # BB#0: |
| 361 | ; SSE-NEXT: andps {{.*}}(%rip), %xmm0 |
| 362 | ; SSE-NEXT: retq |
| 363 | ; |
| 364 | ; AVX-LABEL: combine_vec_lshr_shl_mask0: |
| 365 | ; AVX: # BB#0: |
| 366 | ; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 |
| 367 | ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 |
| 368 | ; AVX-NEXT: retq |
| 369 | %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2> |
| 370 | %2 = lshr <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2> |
| 371 | ret <4 x i32> %2 |
| 372 | } |
| 373 | |
| 374 | define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) { |
| 375 | ; SSE-LABEL: combine_vec_lshr_shl_mask1: |
| 376 | ; SSE: # BB#0: |
| 377 | ; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0 |
| 378 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 379 | ; SSE-NEXT: psrld $5, %xmm1 |
| 380 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 381 | ; SSE-NEXT: psrld $3, %xmm2 |
| 382 | ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| 383 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 384 | ; SSE-NEXT: psrld $4, %xmm1 |
| 385 | ; SSE-NEXT: psrld $2, %xmm0 |
| 386 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| 387 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| 388 | ; SSE-NEXT: retq |
| 389 | ; |
| 390 | ; AVX-LABEL: combine_vec_lshr_shl_mask1: |
| 391 | ; AVX: # BB#0: |
| 392 | ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3,4,5] |
| 393 | ; AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 |
| 394 | ; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 |
| 395 | ; AVX-NEXT: retq |
| 396 | %1 = shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5> |
| 397 | %2 = lshr <4 x i32> %1, <i32 2, i32 3, i32 4, i32 5> |
| 398 | ret <4 x i32> %2 |
| 399 | } |
| 400 | |
| 401 | ; fold (srl (sra X, Y), 31) -> (srl X, 31) |
| 402 | define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) { |
| 403 | ; SSE-LABEL: combine_vec_lshr_ashr_sign: |
| 404 | ; SSE: # BB#0: |
| 405 | ; SSE-NEXT: psrld $31, %xmm0 |
| 406 | ; SSE-NEXT: retq |
| 407 | ; |
| 408 | ; AVX-LABEL: combine_vec_lshr_ashr_sign: |
| 409 | ; AVX: # BB#0: |
| 410 | ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0 |
| 411 | ; AVX-NEXT: retq |
| 412 | %1 = ashr <4 x i32> %x, %y |
| 413 | %2 = lshr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31> |
| 414 | ret <4 x i32> %2 |
| 415 | } |
| 416 | |
| 417 | ; fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). |
| 418 | define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) { |
| 419 | ; SSE-LABEL: combine_vec_lshr_lzcnt_bit0: |
| 420 | ; SSE: # BB#0: |
| 421 | ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 |
| 422 | ; SSE-NEXT: psrld $4, %xmm0 |
| 423 | ; SSE-NEXT: pxor {{.*}}(%rip), %xmm0 |
| 424 | ; SSE-NEXT: retq |
| 425 | ; |
| 426 | ; AVX-LABEL: combine_vec_lshr_lzcnt_bit0: |
| 427 | ; AVX: # BB#0: |
| 428 | ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 |
| 429 | ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 |
| 430 | ; AVX-NEXT: vpsrld $4, %xmm0, %xmm0 |
| 431 | ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 |
| 432 | ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 |
| 433 | ; AVX-NEXT: retq |
| 434 | %1 = and <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16> |
| 435 | %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0) |
| 436 | %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5> |
| 437 | ret <4 x i32> %3 |
| 438 | } |
| 439 | |
| 440 | define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) { |
| 441 | ; SSE-LABEL: combine_vec_lshr_lzcnt_bit1: |
| 442 | ; SSE: # BB#0: |
| 443 | ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 |
| 444 | ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] |
| 445 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 446 | ; SSE-NEXT: pand %xmm2, %xmm1 |
| 447 | ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] |
| 448 | ; SSE-NEXT: movdqa %xmm3, %xmm4 |
| 449 | ; SSE-NEXT: pshufb %xmm1, %xmm4 |
| 450 | ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| 451 | ; SSE-NEXT: psrlw $4, %xmm1 |
| 452 | ; SSE-NEXT: pand %xmm2, %xmm1 |
| 453 | ; SSE-NEXT: pxor %xmm2, %xmm2 |
| 454 | ; SSE-NEXT: pshufb %xmm1, %xmm3 |
| 455 | ; SSE-NEXT: pcmpeqb %xmm2, %xmm1 |
| 456 | ; SSE-NEXT: pand %xmm4, %xmm1 |
| 457 | ; SSE-NEXT: paddb %xmm3, %xmm1 |
| 458 | ; SSE-NEXT: movdqa %xmm0, %xmm3 |
| 459 | ; SSE-NEXT: pcmpeqb %xmm2, %xmm3 |
| 460 | ; SSE-NEXT: psrlw $8, %xmm3 |
| 461 | ; SSE-NEXT: pand %xmm1, %xmm3 |
| 462 | ; SSE-NEXT: psrlw $8, %xmm1 |
| 463 | ; SSE-NEXT: paddw %xmm3, %xmm1 |
| 464 | ; SSE-NEXT: pcmpeqw %xmm2, %xmm0 |
| 465 | ; SSE-NEXT: psrld $16, %xmm0 |
| 466 | ; SSE-NEXT: pand %xmm1, %xmm0 |
| 467 | ; SSE-NEXT: psrld $16, %xmm1 |
| 468 | ; SSE-NEXT: paddd %xmm0, %xmm1 |
| 469 | ; SSE-NEXT: psrld $5, %xmm1 |
| 470 | ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 471 | ; SSE-NEXT: retq |
| 472 | ; |
| 473 | ; AVX-LABEL: combine_vec_lshr_lzcnt_bit1: |
| 474 | ; AVX: # BB#0: |
| 475 | ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 |
| 476 | ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] |
| 477 | ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2 |
| 478 | ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] |
| 479 | ; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2 |
| 480 | ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm4 |
| 481 | ; AVX-NEXT: vpand %xmm1, %xmm4, %xmm1 |
| 482 | ; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 |
| 483 | ; AVX-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 |
| 484 | ; AVX-NEXT: vpand %xmm5, %xmm2, %xmm2 |
| 485 | ; AVX-NEXT: vpshufb %xmm1, %xmm3, %xmm1 |
| 486 | ; AVX-NEXT: vpaddb %xmm1, %xmm2, %xmm1 |
| 487 | ; AVX-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2 |
| 488 | ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 |
| 489 | ; AVX-NEXT: vpand %xmm2, %xmm1, %xmm2 |
| 490 | ; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1 |
| 491 | ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 |
| 492 | ; AVX-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 |
| 493 | ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 |
| 494 | ; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0 |
| 495 | ; AVX-NEXT: vpsrld $16, %xmm1, %xmm1 |
| 496 | ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 |
| 497 | ; AVX-NEXT: vpsrld $5, %xmm0, %xmm0 |
| 498 | ; AVX-NEXT: retq |
| 499 | %1 = and <4 x i32> %x, <i32 4, i32 32, i32 64, i32 128> |
| 500 | %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0) |
| 501 | %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5> |
| 502 | ret <4 x i32> %3 |
| 503 | } |
| 504 | declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) |
| 505 | |
| 506 | ; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). |
| 507 | define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) { |
| 508 | ; SSE-LABEL: combine_vec_lshr_trunc_and: |
| 509 | ; SSE: # BB#0: |
Simon Pilgrim | 476560a | 2016-10-18 19:28:12 +0000 | [diff] [blame] | 510 | ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] |
| 511 | ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| 512 | ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] |
Simon Pilgrim | b2ca250 | 2016-10-19 08:57:37 +0000 | [diff] [blame] | 513 | ; SSE-NEXT: pand {{.*}}(%rip), %xmm1 |
Simon Pilgrim | 476560a | 2016-10-18 19:28:12 +0000 | [diff] [blame] | 514 | ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| 515 | ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| 516 | ; SSE-NEXT: movdqa %xmm0, %xmm3 |
| 517 | ; SSE-NEXT: psrld %xmm2, %xmm3 |
| 518 | ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| 519 | ; SSE-NEXT: psrlq $32, %xmm2 |
| 520 | ; SSE-NEXT: movdqa %xmm0, %xmm4 |
| 521 | ; SSE-NEXT: psrld %xmm2, %xmm4 |
| 522 | ; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7] |
| 523 | ; SSE-NEXT: pxor %xmm2, %xmm2 |
| 524 | ; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero |
| 525 | ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] |
| 526 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 527 | ; SSE-NEXT: psrld %xmm1, %xmm2 |
| 528 | ; SSE-NEXT: psrld %xmm3, %xmm0 |
| 529 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] |
| 530 | ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7] |
| 531 | ; SSE-NEXT: retq |
| 532 | ; |
| 533 | ; AVX-LABEL: combine_vec_lshr_trunc_and: |
| 534 | ; AVX: # BB#0: |
Simon Pilgrim | 476560a | 2016-10-18 19:28:12 +0000 | [diff] [blame] | 535 | ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] |
| 536 | ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] |
Simon Pilgrim | b2ca250 | 2016-10-19 08:57:37 +0000 | [diff] [blame] | 537 | ; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 |
Simon Pilgrim | 476560a | 2016-10-18 19:28:12 +0000 | [diff] [blame] | 538 | ; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 |
| 539 | ; AVX-NEXT: vzeroupper |
| 540 | ; AVX-NEXT: retq |
| 541 | %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535> |
| 542 | %2 = trunc <4 x i64> %1 to <4 x i32> |
| 543 | %3 = lshr <4 x i32> %x, %2 |
| 544 | ret <4 x i32> %3 |
| 545 | } |