| Simon Pilgrim | 57d3240 | 2018-04-05 17:25:40 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | 2 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 | 
|  | 3 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41 | 
|  | 4 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 | 
|  | 5 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 | 
|  | 6 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW | 
|  | 7 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL | 
|  | 8 |  | 
|  | 9 | ; | 
|  | 10 | ; vXi64 | 
|  | 11 | ; | 
|  | 12 |  | 
|  | 13 | define i64 @test_v2i64(<2 x i64> %a0) { | 
|  | 14 | ; SSE2-LABEL: test_v2i64: | 
|  | 15 | ; SSE2:       # %bb.0: | 
|  | 16 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 17 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 18 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 19 | ; SSE2-NEXT:    pxor %xmm2, %xmm3 | 
|  | 20 | ; SSE2-NEXT:    pxor %xmm1, %xmm2 | 
|  | 21 | ; SSE2-NEXT:    movdqa %xmm2, %xmm4 | 
|  | 22 | ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4 | 
|  | 23 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] | 
|  | 24 | ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2 | 
|  | 25 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] | 
|  | 26 | ; SSE2-NEXT:    pand %xmm5, %xmm2 | 
|  | 27 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] | 
|  | 28 | ; SSE2-NEXT:    por %xmm2, %xmm3 | 
|  | 29 | ; SSE2-NEXT:    pand %xmm3, %xmm0 | 
|  | 30 | ; SSE2-NEXT:    pandn %xmm1, %xmm3 | 
|  | 31 | ; SSE2-NEXT:    por %xmm0, %xmm3 | 
|  | 32 | ; SSE2-NEXT:    movq %xmm3, %rax | 
|  | 33 | ; SSE2-NEXT:    retq | 
|  | 34 | ; | 
|  | 35 | ; SSE41-LABEL: test_v2i64: | 
|  | 36 | ; SSE41:       # %bb.0: | 
|  | 37 | ; SSE41-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 38 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] | 
|  | 39 | ; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 40 | ; SSE41-NEXT:    movdqa %xmm1, %xmm3 | 
|  | 41 | ; SSE41-NEXT:    pxor %xmm0, %xmm3 | 
|  | 42 | ; SSE41-NEXT:    pxor %xmm2, %xmm0 | 
|  | 43 | ; SSE41-NEXT:    movdqa %xmm0, %xmm4 | 
|  | 44 | ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4 | 
|  | 45 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] | 
|  | 46 | ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0 | 
|  | 47 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] | 
|  | 48 | ; SSE41-NEXT:    pand %xmm5, %xmm3 | 
|  | 49 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] | 
|  | 50 | ; SSE41-NEXT:    por %xmm3, %xmm0 | 
|  | 51 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2 | 
|  | 52 | ; SSE41-NEXT:    movq %xmm2, %rax | 
|  | 53 | ; SSE41-NEXT:    retq | 
|  | 54 | ; | 
|  | 55 | ; AVX-LABEL: test_v2i64: | 
|  | 56 | ; AVX:       # %bb.0: | 
|  | 57 | ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 58 | ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] | 
|  | 59 | ; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm3 | 
|  | 60 | ; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm2 | 
|  | 61 | ; AVX-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2 | 
|  | 62 | ; AVX-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 | 
|  | 63 | ; AVX-NEXT:    vmovq %xmm0, %rax | 
|  | 64 | ; AVX-NEXT:    retq | 
|  | 65 | ; | 
|  | 66 | ; AVX512BW-LABEL: test_v2i64: | 
|  | 67 | ; AVX512BW:       # %bb.0: | 
|  | 68 | ; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0 | 
|  | 69 | ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 70 | ; AVX512BW-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 71 | ; AVX512BW-NEXT:    vmovq %xmm0, %rax | 
|  | 72 | ; AVX512BW-NEXT:    vzeroupper | 
|  | 73 | ; AVX512BW-NEXT:    retq | 
|  | 74 | ; | 
|  | 75 | ; AVX512VL-LABEL: test_v2i64: | 
|  | 76 | ; AVX512VL:       # %bb.0: | 
|  | 77 | ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 78 | ; AVX512VL-NEXT:    vpminuq %xmm1, %xmm0, %xmm0 | 
|  | 79 | ; AVX512VL-NEXT:    vmovq %xmm0, %rax | 
|  | 80 | ; AVX512VL-NEXT:    retq | 
|  | 81 | %1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v2i64(<2 x i64> %a0) | 
|  | 82 | ret i64 %1 | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | define i64 @test_v4i64(<4 x i64> %a0) { | 
|  | 86 | ; SSE2-LABEL: test_v4i64: | 
|  | 87 | ; SSE2:       # %bb.0: | 
|  | 88 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 89 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 90 | ; SSE2-NEXT:    pxor %xmm2, %xmm3 | 
|  | 91 | ; SSE2-NEXT:    movdqa %xmm1, %xmm4 | 
|  | 92 | ; SSE2-NEXT:    pxor %xmm2, %xmm4 | 
|  | 93 | ; SSE2-NEXT:    movdqa %xmm4, %xmm5 | 
|  | 94 | ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm5 | 
|  | 95 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] | 
|  | 96 | ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm4 | 
|  | 97 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] | 
|  | 98 | ; SSE2-NEXT:    pand %xmm6, %xmm3 | 
|  | 99 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] | 
|  | 100 | ; SSE2-NEXT:    por %xmm3, %xmm4 | 
|  | 101 | ; SSE2-NEXT:    pand %xmm4, %xmm0 | 
|  | 102 | ; SSE2-NEXT:    pandn %xmm1, %xmm4 | 
|  | 103 | ; SSE2-NEXT:    por %xmm0, %xmm4 | 
|  | 104 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1] | 
|  | 105 | ; SSE2-NEXT:    movdqa %xmm4, %xmm1 | 
|  | 106 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 107 | ; SSE2-NEXT:    pxor %xmm0, %xmm2 | 
|  | 108 | ; SSE2-NEXT:    movdqa %xmm2, %xmm3 | 
|  | 109 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3 | 
|  | 110 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2] | 
|  | 111 | ; SSE2-NEXT:    pcmpeqd %xmm1, %xmm2 | 
|  | 112 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] | 
|  | 113 | ; SSE2-NEXT:    pand %xmm5, %xmm1 | 
|  | 114 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] | 
|  | 115 | ; SSE2-NEXT:    por %xmm1, %xmm2 | 
|  | 116 | ; SSE2-NEXT:    pand %xmm2, %xmm4 | 
|  | 117 | ; SSE2-NEXT:    pandn %xmm0, %xmm2 | 
|  | 118 | ; SSE2-NEXT:    por %xmm4, %xmm2 | 
|  | 119 | ; SSE2-NEXT:    movq %xmm2, %rax | 
|  | 120 | ; SSE2-NEXT:    retq | 
|  | 121 | ; | 
|  | 122 | ; SSE41-LABEL: test_v4i64: | 
|  | 123 | ; SSE41:       # %bb.0: | 
|  | 124 | ; SSE41-NEXT:    movdqa %xmm0, %xmm2 | 
|  | 125 | ; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 126 | ; SSE41-NEXT:    pxor %xmm3, %xmm0 | 
|  | 127 | ; SSE41-NEXT:    movdqa %xmm1, %xmm4 | 
|  | 128 | ; SSE41-NEXT:    pxor %xmm3, %xmm4 | 
|  | 129 | ; SSE41-NEXT:    movdqa %xmm4, %xmm5 | 
|  | 130 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5 | 
|  | 131 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] | 
|  | 132 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4 | 
|  | 133 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] | 
|  | 134 | ; SSE41-NEXT:    pand %xmm6, %xmm4 | 
|  | 135 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3] | 
|  | 136 | ; SSE41-NEXT:    por %xmm4, %xmm0 | 
|  | 137 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1 | 
|  | 138 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] | 
|  | 139 | ; SSE41-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 140 | ; SSE41-NEXT:    pxor %xmm3, %xmm0 | 
|  | 141 | ; SSE41-NEXT:    pxor %xmm2, %xmm3 | 
|  | 142 | ; SSE41-NEXT:    movdqa %xmm3, %xmm4 | 
|  | 143 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4 | 
|  | 144 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] | 
|  | 145 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3 | 
|  | 146 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] | 
|  | 147 | ; SSE41-NEXT:    pand %xmm5, %xmm3 | 
|  | 148 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] | 
|  | 149 | ; SSE41-NEXT:    por %xmm3, %xmm0 | 
|  | 150 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2 | 
|  | 151 | ; SSE41-NEXT:    movq %xmm2, %rax | 
|  | 152 | ; SSE41-NEXT:    retq | 
|  | 153 | ; | 
|  | 154 | ; AVX1-LABEL: test_v4i64: | 
|  | 155 | ; AVX1:       # %bb.0: | 
|  | 156 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 157 | ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] | 
|  | 158 | ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3 | 
|  | 159 | ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm4 | 
|  | 160 | ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3 | 
|  | 161 | ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4 | 
|  | 162 | ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3 | 
|  | 163 | ; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0 | 
|  | 164 | ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 165 | ; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3 | 
|  | 166 | ; AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm4 | 
|  | 167 | ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3 | 
|  | 168 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4 | 
|  | 169 | ; AVX1-NEXT:    vpxor %xmm2, %xmm4, %xmm2 | 
|  | 170 | ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm2 | 
|  | 171 | ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2 | 
|  | 172 | ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 173 | ; AVX1-NEXT:    vmovq %xmm0, %rax | 
|  | 174 | ; AVX1-NEXT:    vzeroupper | 
|  | 175 | ; AVX1-NEXT:    retq | 
|  | 176 | ; | 
|  | 177 | ; AVX2-LABEL: test_v4i64: | 
|  | 178 | ; AVX2:       # %bb.0: | 
|  | 179 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 180 | ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] | 
|  | 181 | ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3 | 
|  | 182 | ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm4 | 
|  | 183 | ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm4, %ymm3 | 
|  | 184 | ; AVX2-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0 | 
|  | 185 | ; AVX2-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 186 | ; AVX2-NEXT:    vxorpd %ymm2, %ymm0, %ymm3 | 
|  | 187 | ; AVX2-NEXT:    vxorpd %ymm2, %ymm1, %ymm2 | 
|  | 188 | ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm2 | 
|  | 189 | ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 190 | ; AVX2-NEXT:    vmovq %xmm0, %rax | 
|  | 191 | ; AVX2-NEXT:    vzeroupper | 
|  | 192 | ; AVX2-NEXT:    retq | 
|  | 193 | ; | 
|  | 194 | ; AVX512BW-LABEL: test_v4i64: | 
|  | 195 | ; AVX512BW:       # %bb.0: | 
|  | 196 | ; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0 | 
|  | 197 | ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 198 | ; AVX512BW-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 199 | ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 200 | ; AVX512BW-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 201 | ; AVX512BW-NEXT:    vmovq %xmm0, %rax | 
|  | 202 | ; AVX512BW-NEXT:    vzeroupper | 
|  | 203 | ; AVX512BW-NEXT:    retq | 
|  | 204 | ; | 
|  | 205 | ; AVX512VL-LABEL: test_v4i64: | 
|  | 206 | ; AVX512VL:       # %bb.0: | 
|  | 207 | ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 208 | ; AVX512VL-NEXT:    vpminuq %ymm1, %ymm0, %ymm0 | 
|  | 209 | ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 210 | ; AVX512VL-NEXT:    vpminuq %ymm1, %ymm0, %ymm0 | 
|  | 211 | ; AVX512VL-NEXT:    vmovq %xmm0, %rax | 
|  | 212 | ; AVX512VL-NEXT:    vzeroupper | 
|  | 213 | ; AVX512VL-NEXT:    retq | 
|  | 214 | %1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v4i64(<4 x i64> %a0) | 
|  | 215 | ret i64 %1 | 
|  | 216 | } | 
|  | 217 |  | 
|  | 218 | define i64 @test_v8i64(<8 x i64> %a0) { | 
|  | 219 | ; SSE2-LABEL: test_v8i64: | 
|  | 220 | ; SSE2:       # %bb.0: | 
|  | 221 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 222 | ; SSE2-NEXT:    movdqa %xmm1, %xmm5 | 
|  | 223 | ; SSE2-NEXT:    pxor %xmm4, %xmm5 | 
|  | 224 | ; SSE2-NEXT:    movdqa %xmm3, %xmm6 | 
|  | 225 | ; SSE2-NEXT:    pxor %xmm4, %xmm6 | 
|  | 226 | ; SSE2-NEXT:    movdqa %xmm6, %xmm7 | 
|  | 227 | ; SSE2-NEXT:    pcmpgtd %xmm5, %xmm7 | 
|  | 228 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2] | 
|  | 229 | ; SSE2-NEXT:    pcmpeqd %xmm5, %xmm6 | 
|  | 230 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] | 
|  | 231 | ; SSE2-NEXT:    pand %xmm8, %xmm6 | 
|  | 232 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3] | 
|  | 233 | ; SSE2-NEXT:    por %xmm6, %xmm5 | 
|  | 234 | ; SSE2-NEXT:    pand %xmm5, %xmm1 | 
|  | 235 | ; SSE2-NEXT:    pandn %xmm3, %xmm5 | 
|  | 236 | ; SSE2-NEXT:    por %xmm1, %xmm5 | 
|  | 237 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 238 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 239 | ; SSE2-NEXT:    movdqa %xmm2, %xmm3 | 
|  | 240 | ; SSE2-NEXT:    pxor %xmm4, %xmm3 | 
|  | 241 | ; SSE2-NEXT:    movdqa %xmm3, %xmm6 | 
|  | 242 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm6 | 
|  | 243 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2] | 
|  | 244 | ; SSE2-NEXT:    pcmpeqd %xmm1, %xmm3 | 
|  | 245 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3] | 
|  | 246 | ; SSE2-NEXT:    pand %xmm7, %xmm1 | 
|  | 247 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3] | 
|  | 248 | ; SSE2-NEXT:    por %xmm1, %xmm3 | 
|  | 249 | ; SSE2-NEXT:    pand %xmm3, %xmm0 | 
|  | 250 | ; SSE2-NEXT:    pandn %xmm2, %xmm3 | 
|  | 251 | ; SSE2-NEXT:    por %xmm0, %xmm3 | 
|  | 252 | ; SSE2-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 253 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 254 | ; SSE2-NEXT:    movdqa %xmm5, %xmm1 | 
|  | 255 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 256 | ; SSE2-NEXT:    movdqa %xmm1, %xmm2 | 
|  | 257 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2 | 
|  | 258 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2] | 
|  | 259 | ; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1 | 
|  | 260 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] | 
|  | 261 | ; SSE2-NEXT:    pand %xmm6, %xmm0 | 
|  | 262 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] | 
|  | 263 | ; SSE2-NEXT:    por %xmm0, %xmm1 | 
|  | 264 | ; SSE2-NEXT:    pand %xmm1, %xmm3 | 
|  | 265 | ; SSE2-NEXT:    pandn %xmm5, %xmm1 | 
|  | 266 | ; SSE2-NEXT:    por %xmm3, %xmm1 | 
|  | 267 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] | 
|  | 268 | ; SSE2-NEXT:    movdqa %xmm1, %xmm2 | 
|  | 269 | ; SSE2-NEXT:    pxor %xmm4, %xmm2 | 
|  | 270 | ; SSE2-NEXT:    pxor %xmm0, %xmm4 | 
|  | 271 | ; SSE2-NEXT:    movdqa %xmm4, %xmm3 | 
|  | 272 | ; SSE2-NEXT:    pcmpgtd %xmm2, %xmm3 | 
|  | 273 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2] | 
|  | 274 | ; SSE2-NEXT:    pcmpeqd %xmm2, %xmm4 | 
|  | 275 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3] | 
|  | 276 | ; SSE2-NEXT:    pand %xmm5, %xmm2 | 
|  | 277 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] | 
|  | 278 | ; SSE2-NEXT:    por %xmm2, %xmm3 | 
|  | 279 | ; SSE2-NEXT:    pand %xmm3, %xmm1 | 
|  | 280 | ; SSE2-NEXT:    pandn %xmm0, %xmm3 | 
|  | 281 | ; SSE2-NEXT:    por %xmm1, %xmm3 | 
|  | 282 | ; SSE2-NEXT:    movq %xmm3, %rax | 
|  | 283 | ; SSE2-NEXT:    retq | 
|  | 284 | ; | 
|  | 285 | ; SSE41-LABEL: test_v8i64: | 
|  | 286 | ; SSE41:       # %bb.0: | 
|  | 287 | ; SSE41-NEXT:    movdqa %xmm0, %xmm8 | 
|  | 288 | ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 289 | ; SSE41-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 290 | ; SSE41-NEXT:    pxor %xmm5, %xmm0 | 
|  | 291 | ; SSE41-NEXT:    movdqa %xmm3, %xmm6 | 
|  | 292 | ; SSE41-NEXT:    pxor %xmm5, %xmm6 | 
|  | 293 | ; SSE41-NEXT:    movdqa %xmm6, %xmm7 | 
|  | 294 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7 | 
|  | 295 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2] | 
|  | 296 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6 | 
|  | 297 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] | 
|  | 298 | ; SSE41-NEXT:    pand %xmm4, %xmm6 | 
|  | 299 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3] | 
|  | 300 | ; SSE41-NEXT:    por %xmm6, %xmm0 | 
|  | 301 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3 | 
|  | 302 | ; SSE41-NEXT:    movdqa %xmm8, %xmm0 | 
|  | 303 | ; SSE41-NEXT:    pxor %xmm5, %xmm0 | 
|  | 304 | ; SSE41-NEXT:    movdqa %xmm2, %xmm1 | 
|  | 305 | ; SSE41-NEXT:    pxor %xmm5, %xmm1 | 
|  | 306 | ; SSE41-NEXT:    movdqa %xmm1, %xmm4 | 
|  | 307 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4 | 
|  | 308 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2] | 
|  | 309 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1 | 
|  | 310 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] | 
|  | 311 | ; SSE41-NEXT:    pand %xmm6, %xmm1 | 
|  | 312 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] | 
|  | 313 | ; SSE41-NEXT:    por %xmm1, %xmm0 | 
|  | 314 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2 | 
|  | 315 | ; SSE41-NEXT:    movapd %xmm2, %xmm0 | 
|  | 316 | ; SSE41-NEXT:    xorpd %xmm5, %xmm0 | 
|  | 317 | ; SSE41-NEXT:    movapd %xmm3, %xmm1 | 
|  | 318 | ; SSE41-NEXT:    xorpd %xmm5, %xmm1 | 
|  | 319 | ; SSE41-NEXT:    movapd %xmm1, %xmm4 | 
|  | 320 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4 | 
|  | 321 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2] | 
|  | 322 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1 | 
|  | 323 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] | 
|  | 324 | ; SSE41-NEXT:    pand %xmm6, %xmm1 | 
|  | 325 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] | 
|  | 326 | ; SSE41-NEXT:    por %xmm1, %xmm0 | 
|  | 327 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3 | 
|  | 328 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1] | 
|  | 329 | ; SSE41-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 330 | ; SSE41-NEXT:    pxor %xmm5, %xmm0 | 
|  | 331 | ; SSE41-NEXT:    pxor %xmm1, %xmm5 | 
|  | 332 | ; SSE41-NEXT:    movdqa %xmm5, %xmm2 | 
|  | 333 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2 | 
|  | 334 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2] | 
|  | 335 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5 | 
|  | 336 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] | 
|  | 337 | ; SSE41-NEXT:    pand %xmm4, %xmm5 | 
|  | 338 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] | 
|  | 339 | ; SSE41-NEXT:    por %xmm5, %xmm0 | 
|  | 340 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1 | 
|  | 341 | ; SSE41-NEXT:    movq %xmm1, %rax | 
|  | 342 | ; SSE41-NEXT:    retq | 
|  | 343 | ; | 
|  | 344 | ; AVX1-LABEL: test_v8i64: | 
|  | 345 | ; AVX1:       # %bb.0: | 
|  | 346 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2 | 
|  | 347 | ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] | 
|  | 348 | ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2 | 
|  | 349 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4 | 
|  | 350 | ; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4 | 
|  | 351 | ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2 | 
|  | 352 | ; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4 | 
|  | 353 | ; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm5 | 
|  | 354 | ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4 | 
|  | 355 | ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2 | 
|  | 356 | ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 357 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 358 | ; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2 | 
|  | 359 | ; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4 | 
|  | 360 | ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2 | 
|  | 361 | ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4 | 
|  | 362 | ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2 | 
|  | 363 | ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 364 | ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 365 | ; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2 | 
|  | 366 | ; AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm4 | 
|  | 367 | ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2 | 
|  | 368 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4 | 
|  | 369 | ; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm3 | 
|  | 370 | ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm3 | 
|  | 371 | ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2 | 
|  | 372 | ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 373 | ; AVX1-NEXT:    vmovq %xmm0, %rax | 
|  | 374 | ; AVX1-NEXT:    vzeroupper | 
|  | 375 | ; AVX1-NEXT:    retq | 
|  | 376 | ; | 
|  | 377 | ; AVX2-LABEL: test_v8i64: | 
|  | 378 | ; AVX2:       # %bb.0: | 
|  | 379 | ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] | 
|  | 380 | ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3 | 
|  | 381 | ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm4 | 
|  | 382 | ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm4, %ymm3 | 
|  | 383 | ; AVX2-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0 | 
|  | 384 | ; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 385 | ; AVX2-NEXT:    vxorpd %ymm2, %ymm0, %ymm3 | 
|  | 386 | ; AVX2-NEXT:    vxorpd %ymm2, %ymm1, %ymm4 | 
|  | 387 | ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm4, %ymm3 | 
|  | 388 | ; AVX2-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0 | 
|  | 389 | ; AVX2-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 390 | ; AVX2-NEXT:    vxorpd %ymm2, %ymm0, %ymm3 | 
|  | 391 | ; AVX2-NEXT:    vxorpd %ymm2, %ymm1, %ymm2 | 
|  | 392 | ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm2 | 
|  | 393 | ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 394 | ; AVX2-NEXT:    vmovq %xmm0, %rax | 
|  | 395 | ; AVX2-NEXT:    vzeroupper | 
|  | 396 | ; AVX2-NEXT:    retq | 
|  | 397 | ; | 
|  | 398 | ; AVX512-LABEL: test_v8i64: | 
|  | 399 | ; AVX512:       # %bb.0: | 
|  | 400 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 401 | ; AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 402 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 403 | ; AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 404 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 405 | ; AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 406 | ; AVX512-NEXT:    vmovq %xmm0, %rax | 
|  | 407 | ; AVX512-NEXT:    vzeroupper | 
|  | 408 | ; AVX512-NEXT:    retq | 
|  | 409 | %1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v8i64(<8 x i64> %a0) | 
|  | 410 | ret i64 %1 | 
|  | 411 | } | 
|  | 412 |  | 
|  | 413 | define i64 @test_v16i64(<16 x i64> %a0) { | 
|  | 414 | ; SSE2-LABEL: test_v16i64: | 
|  | 415 | ; SSE2:       # %bb.0: | 
|  | 416 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 417 | ; SSE2-NEXT:    movdqa %xmm2, %xmm9 | 
|  | 418 | ; SSE2-NEXT:    pxor %xmm8, %xmm9 | 
|  | 419 | ; SSE2-NEXT:    movdqa %xmm6, %xmm10 | 
|  | 420 | ; SSE2-NEXT:    pxor %xmm8, %xmm10 | 
|  | 421 | ; SSE2-NEXT:    movdqa %xmm10, %xmm11 | 
|  | 422 | ; SSE2-NEXT:    pcmpgtd %xmm9, %xmm11 | 
|  | 423 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2] | 
|  | 424 | ; SSE2-NEXT:    pcmpeqd %xmm9, %xmm10 | 
|  | 425 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3] | 
|  | 426 | ; SSE2-NEXT:    pand %xmm12, %xmm10 | 
|  | 427 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm9 = xmm11[1,1,3,3] | 
|  | 428 | ; SSE2-NEXT:    por %xmm10, %xmm9 | 
|  | 429 | ; SSE2-NEXT:    pand %xmm9, %xmm2 | 
|  | 430 | ; SSE2-NEXT:    pandn %xmm6, %xmm9 | 
|  | 431 | ; SSE2-NEXT:    por %xmm2, %xmm9 | 
|  | 432 | ; SSE2-NEXT:    movdqa %xmm0, %xmm2 | 
|  | 433 | ; SSE2-NEXT:    pxor %xmm8, %xmm2 | 
|  | 434 | ; SSE2-NEXT:    movdqa %xmm4, %xmm6 | 
|  | 435 | ; SSE2-NEXT:    pxor %xmm8, %xmm6 | 
|  | 436 | ; SSE2-NEXT:    movdqa %xmm6, %xmm10 | 
|  | 437 | ; SSE2-NEXT:    pcmpgtd %xmm2, %xmm10 | 
|  | 438 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2] | 
|  | 439 | ; SSE2-NEXT:    pcmpeqd %xmm2, %xmm6 | 
|  | 440 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] | 
|  | 441 | ; SSE2-NEXT:    pand %xmm11, %xmm6 | 
|  | 442 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm10[1,1,3,3] | 
|  | 443 | ; SSE2-NEXT:    por %xmm6, %xmm2 | 
|  | 444 | ; SSE2-NEXT:    pand %xmm2, %xmm0 | 
|  | 445 | ; SSE2-NEXT:    pandn %xmm4, %xmm2 | 
|  | 446 | ; SSE2-NEXT:    por %xmm0, %xmm2 | 
|  | 447 | ; SSE2-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 448 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 449 | ; SSE2-NEXT:    movdqa %xmm7, %xmm4 | 
|  | 450 | ; SSE2-NEXT:    pxor %xmm8, %xmm4 | 
|  | 451 | ; SSE2-NEXT:    movdqa %xmm4, %xmm6 | 
|  | 452 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm6 | 
|  | 453 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2] | 
|  | 454 | ; SSE2-NEXT:    pcmpeqd %xmm0, %xmm4 | 
|  | 455 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] | 
|  | 456 | ; SSE2-NEXT:    pand %xmm10, %xmm4 | 
|  | 457 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3] | 
|  | 458 | ; SSE2-NEXT:    por %xmm4, %xmm0 | 
|  | 459 | ; SSE2-NEXT:    pand %xmm0, %xmm3 | 
|  | 460 | ; SSE2-NEXT:    pandn %xmm7, %xmm0 | 
|  | 461 | ; SSE2-NEXT:    por %xmm3, %xmm0 | 
|  | 462 | ; SSE2-NEXT:    movdqa %xmm1, %xmm3 | 
|  | 463 | ; SSE2-NEXT:    pxor %xmm8, %xmm3 | 
|  | 464 | ; SSE2-NEXT:    movdqa %xmm5, %xmm4 | 
|  | 465 | ; SSE2-NEXT:    pxor %xmm8, %xmm4 | 
|  | 466 | ; SSE2-NEXT:    movdqa %xmm4, %xmm6 | 
|  | 467 | ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm6 | 
|  | 468 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2] | 
|  | 469 | ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm4 | 
|  | 470 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] | 
|  | 471 | ; SSE2-NEXT:    pand %xmm7, %xmm3 | 
|  | 472 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3] | 
|  | 473 | ; SSE2-NEXT:    por %xmm3, %xmm4 | 
|  | 474 | ; SSE2-NEXT:    pand %xmm4, %xmm1 | 
|  | 475 | ; SSE2-NEXT:    pandn %xmm5, %xmm4 | 
|  | 476 | ; SSE2-NEXT:    por %xmm1, %xmm4 | 
|  | 477 | ; SSE2-NEXT:    movdqa %xmm4, %xmm1 | 
|  | 478 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 479 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 480 | ; SSE2-NEXT:    pxor %xmm8, %xmm3 | 
|  | 481 | ; SSE2-NEXT:    movdqa %xmm3, %xmm5 | 
|  | 482 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm5 | 
|  | 483 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] | 
|  | 484 | ; SSE2-NEXT:    pcmpeqd %xmm1, %xmm3 | 
|  | 485 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] | 
|  | 486 | ; SSE2-NEXT:    pand %xmm6, %xmm3 | 
|  | 487 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3] | 
|  | 488 | ; SSE2-NEXT:    por %xmm3, %xmm1 | 
|  | 489 | ; SSE2-NEXT:    pand %xmm1, %xmm4 | 
|  | 490 | ; SSE2-NEXT:    pandn %xmm0, %xmm1 | 
|  | 491 | ; SSE2-NEXT:    por %xmm4, %xmm1 | 
|  | 492 | ; SSE2-NEXT:    movdqa %xmm2, %xmm0 | 
|  | 493 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 494 | ; SSE2-NEXT:    movdqa %xmm9, %xmm3 | 
|  | 495 | ; SSE2-NEXT:    pxor %xmm8, %xmm3 | 
|  | 496 | ; SSE2-NEXT:    movdqa %xmm3, %xmm4 | 
|  | 497 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm4 | 
|  | 498 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] | 
|  | 499 | ; SSE2-NEXT:    pcmpeqd %xmm0, %xmm3 | 
|  | 500 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3] | 
|  | 501 | ; SSE2-NEXT:    pand %xmm5, %xmm0 | 
|  | 502 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] | 
|  | 503 | ; SSE2-NEXT:    por %xmm0, %xmm3 | 
|  | 504 | ; SSE2-NEXT:    pand %xmm3, %xmm2 | 
|  | 505 | ; SSE2-NEXT:    pandn %xmm9, %xmm3 | 
|  | 506 | ; SSE2-NEXT:    por %xmm2, %xmm3 | 
|  | 507 | ; SSE2-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 508 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 509 | ; SSE2-NEXT:    movdqa %xmm1, %xmm2 | 
|  | 510 | ; SSE2-NEXT:    pxor %xmm8, %xmm2 | 
|  | 511 | ; SSE2-NEXT:    movdqa %xmm2, %xmm4 | 
|  | 512 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm4 | 
|  | 513 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] | 
|  | 514 | ; SSE2-NEXT:    pcmpeqd %xmm0, %xmm2 | 
|  | 515 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] | 
|  | 516 | ; SSE2-NEXT:    pand %xmm5, %xmm0 | 
|  | 517 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3] | 
|  | 518 | ; SSE2-NEXT:    por %xmm0, %xmm2 | 
|  | 519 | ; SSE2-NEXT:    pand %xmm2, %xmm3 | 
|  | 520 | ; SSE2-NEXT:    pandn %xmm1, %xmm2 | 
|  | 521 | ; SSE2-NEXT:    por %xmm3, %xmm2 | 
|  | 522 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] | 
|  | 523 | ; SSE2-NEXT:    movdqa %xmm2, %xmm1 | 
|  | 524 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 525 | ; SSE2-NEXT:    pxor %xmm0, %xmm8 | 
|  | 526 | ; SSE2-NEXT:    movdqa %xmm8, %xmm3 | 
|  | 527 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3 | 
|  | 528 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2] | 
|  | 529 | ; SSE2-NEXT:    pcmpeqd %xmm1, %xmm8 | 
|  | 530 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3] | 
|  | 531 | ; SSE2-NEXT:    pand %xmm4, %xmm1 | 
|  | 532 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] | 
|  | 533 | ; SSE2-NEXT:    por %xmm1, %xmm3 | 
|  | 534 | ; SSE2-NEXT:    pand %xmm3, %xmm2 | 
|  | 535 | ; SSE2-NEXT:    pandn %xmm0, %xmm3 | 
|  | 536 | ; SSE2-NEXT:    por %xmm2, %xmm3 | 
|  | 537 | ; SSE2-NEXT:    movq %xmm3, %rax | 
|  | 538 | ; SSE2-NEXT:    retq | 
|  | 539 | ; | 
|  | 540 | ; SSE41-LABEL: test_v16i64: | 
|  | 541 | ; SSE41:       # %bb.0: | 
|  | 542 | ; SSE41-NEXT:    movdqa %xmm0, %xmm8 | 
|  | 543 | ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 544 | ; SSE41-NEXT:    movdqa %xmm2, %xmm10 | 
|  | 545 | ; SSE41-NEXT:    pxor %xmm9, %xmm10 | 
|  | 546 | ; SSE41-NEXT:    movdqa %xmm6, %xmm0 | 
|  | 547 | ; SSE41-NEXT:    pxor %xmm9, %xmm0 | 
|  | 548 | ; SSE41-NEXT:    movdqa %xmm0, %xmm11 | 
|  | 549 | ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11 | 
|  | 550 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2] | 
|  | 551 | ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0 | 
|  | 552 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3] | 
|  | 553 | ; SSE41-NEXT:    pand %xmm12, %xmm10 | 
|  | 554 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3] | 
|  | 555 | ; SSE41-NEXT:    por %xmm10, %xmm0 | 
|  | 556 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6 | 
|  | 557 | ; SSE41-NEXT:    movdqa %xmm8, %xmm0 | 
|  | 558 | ; SSE41-NEXT:    pxor %xmm9, %xmm0 | 
|  | 559 | ; SSE41-NEXT:    movdqa %xmm4, %xmm2 | 
|  | 560 | ; SSE41-NEXT:    pxor %xmm9, %xmm2 | 
|  | 561 | ; SSE41-NEXT:    movdqa %xmm2, %xmm10 | 
|  | 562 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10 | 
|  | 563 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2] | 
|  | 564 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2 | 
|  | 565 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] | 
|  | 566 | ; SSE41-NEXT:    pand %xmm11, %xmm2 | 
|  | 567 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3] | 
|  | 568 | ; SSE41-NEXT:    por %xmm2, %xmm0 | 
|  | 569 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4 | 
|  | 570 | ; SSE41-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 571 | ; SSE41-NEXT:    pxor %xmm9, %xmm0 | 
|  | 572 | ; SSE41-NEXT:    movdqa %xmm7, %xmm2 | 
|  | 573 | ; SSE41-NEXT:    pxor %xmm9, %xmm2 | 
|  | 574 | ; SSE41-NEXT:    movdqa %xmm2, %xmm8 | 
|  | 575 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm8 | 
|  | 576 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2] | 
|  | 577 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2 | 
|  | 578 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] | 
|  | 579 | ; SSE41-NEXT:    pand %xmm10, %xmm2 | 
|  | 580 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm8[1,1,3,3] | 
|  | 581 | ; SSE41-NEXT:    por %xmm2, %xmm0 | 
|  | 582 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7 | 
|  | 583 | ; SSE41-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 584 | ; SSE41-NEXT:    pxor %xmm9, %xmm0 | 
|  | 585 | ; SSE41-NEXT:    movdqa %xmm5, %xmm2 | 
|  | 586 | ; SSE41-NEXT:    pxor %xmm9, %xmm2 | 
|  | 587 | ; SSE41-NEXT:    movdqa %xmm2, %xmm3 | 
|  | 588 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3 | 
|  | 589 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2] | 
|  | 590 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2 | 
|  | 591 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] | 
|  | 592 | ; SSE41-NEXT:    pand %xmm8, %xmm2 | 
|  | 593 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3] | 
|  | 594 | ; SSE41-NEXT:    por %xmm2, %xmm0 | 
|  | 595 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5 | 
|  | 596 | ; SSE41-NEXT:    movapd %xmm5, %xmm0 | 
|  | 597 | ; SSE41-NEXT:    xorpd %xmm9, %xmm0 | 
|  | 598 | ; SSE41-NEXT:    movapd %xmm7, %xmm1 | 
|  | 599 | ; SSE41-NEXT:    xorpd %xmm9, %xmm1 | 
|  | 600 | ; SSE41-NEXT:    movapd %xmm1, %xmm2 | 
|  | 601 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2 | 
|  | 602 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] | 
|  | 603 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1 | 
|  | 604 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] | 
|  | 605 | ; SSE41-NEXT:    pand %xmm3, %xmm1 | 
|  | 606 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] | 
|  | 607 | ; SSE41-NEXT:    por %xmm1, %xmm0 | 
|  | 608 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7 | 
|  | 609 | ; SSE41-NEXT:    movapd %xmm4, %xmm0 | 
|  | 610 | ; SSE41-NEXT:    xorpd %xmm9, %xmm0 | 
|  | 611 | ; SSE41-NEXT:    movapd %xmm6, %xmm1 | 
|  | 612 | ; SSE41-NEXT:    xorpd %xmm9, %xmm1 | 
|  | 613 | ; SSE41-NEXT:    movapd %xmm1, %xmm2 | 
|  | 614 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2 | 
|  | 615 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] | 
|  | 616 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1 | 
|  | 617 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] | 
|  | 618 | ; SSE41-NEXT:    pand %xmm3, %xmm1 | 
|  | 619 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] | 
|  | 620 | ; SSE41-NEXT:    por %xmm1, %xmm0 | 
|  | 621 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6 | 
|  | 622 | ; SSE41-NEXT:    movapd %xmm6, %xmm0 | 
|  | 623 | ; SSE41-NEXT:    xorpd %xmm9, %xmm0 | 
|  | 624 | ; SSE41-NEXT:    movapd %xmm7, %xmm1 | 
|  | 625 | ; SSE41-NEXT:    xorpd %xmm9, %xmm1 | 
|  | 626 | ; SSE41-NEXT:    movapd %xmm1, %xmm2 | 
|  | 627 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2 | 
|  | 628 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] | 
|  | 629 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1 | 
|  | 630 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] | 
|  | 631 | ; SSE41-NEXT:    pand %xmm3, %xmm1 | 
|  | 632 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] | 
|  | 633 | ; SSE41-NEXT:    por %xmm1, %xmm0 | 
|  | 634 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7 | 
|  | 635 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1] | 
|  | 636 | ; SSE41-NEXT:    movdqa %xmm7, %xmm0 | 
|  | 637 | ; SSE41-NEXT:    pxor %xmm9, %xmm0 | 
|  | 638 | ; SSE41-NEXT:    pxor %xmm1, %xmm9 | 
|  | 639 | ; SSE41-NEXT:    movdqa %xmm9, %xmm2 | 
|  | 640 | ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2 | 
|  | 641 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] | 
|  | 642 | ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9 | 
|  | 643 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3] | 
|  | 644 | ; SSE41-NEXT:    pand %xmm3, %xmm4 | 
|  | 645 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] | 
|  | 646 | ; SSE41-NEXT:    por %xmm4, %xmm0 | 
|  | 647 | ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1 | 
|  | 648 | ; SSE41-NEXT:    movq %xmm1, %rax | 
|  | 649 | ; SSE41-NEXT:    retq | 
|  | 650 | ; | 
|  | 651 | ; AVX1-LABEL: test_v16i64: | 
|  | 652 | ; AVX1:       # %bb.0: | 
|  | 653 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5 | 
|  | 654 | ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808] | 
|  | 655 | ; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5 | 
|  | 656 | ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm6 | 
|  | 657 | ; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6 | 
|  | 658 | ; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5 | 
|  | 659 | ; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm6 | 
|  | 660 | ; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm7 | 
|  | 661 | ; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6 | 
|  | 662 | ; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5 | 
|  | 663 | ; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm3, %ymm1 | 
|  | 664 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3 | 
|  | 665 | ; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3 | 
|  | 666 | ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5 | 
|  | 667 | ; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5 | 
|  | 668 | ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3 | 
|  | 669 | ; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm5 | 
|  | 670 | ; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm6 | 
|  | 671 | ; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5 | 
|  | 672 | ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3 | 
|  | 673 | ; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0 | 
|  | 674 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2 | 
|  | 675 | ; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2 | 
|  | 676 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3 | 
|  | 677 | ; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3 | 
|  | 678 | ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2 | 
|  | 679 | ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm3 | 
|  | 680 | ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm5 | 
|  | 681 | ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3 | 
|  | 682 | ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2 | 
|  | 683 | ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 684 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 685 | ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2 | 
|  | 686 | ; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm3 | 
|  | 687 | ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2 | 
|  | 688 | ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm3 | 
|  | 689 | ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2 | 
|  | 690 | ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 691 | ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 692 | ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2 | 
|  | 693 | ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3 | 
|  | 694 | ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2 | 
|  | 695 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3 | 
|  | 696 | ; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3 | 
|  | 697 | ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm3 | 
|  | 698 | ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2 | 
|  | 699 | ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 700 | ; AVX1-NEXT:    vmovq %xmm0, %rax | 
|  | 701 | ; AVX1-NEXT:    vzeroupper | 
|  | 702 | ; AVX1-NEXT:    retq | 
|  | 703 | ; | 
|  | 704 | ; AVX2-LABEL: test_v16i64: | 
|  | 705 | ; AVX2:       # %bb.0: | 
|  | 706 | ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] | 
|  | 707 | ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm5 | 
|  | 708 | ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm6 | 
|  | 709 | ; AVX2-NEXT:    vpcmpgtq %ymm5, %ymm6, %ymm5 | 
|  | 710 | ; AVX2-NEXT:    vblendvpd %ymm5, %ymm1, %ymm3, %ymm1 | 
|  | 711 | ; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm3 | 
|  | 712 | ; AVX2-NEXT:    vpxor %ymm4, %ymm2, %ymm5 | 
|  | 713 | ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm5, %ymm3 | 
|  | 714 | ; AVX2-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0 | 
|  | 715 | ; AVX2-NEXT:    vxorpd %ymm4, %ymm0, %ymm2 | 
|  | 716 | ; AVX2-NEXT:    vxorpd %ymm4, %ymm1, %ymm3 | 
|  | 717 | ; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm3, %ymm2 | 
|  | 718 | ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 719 | ; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 720 | ; AVX2-NEXT:    vxorpd %ymm4, %ymm0, %ymm2 | 
|  | 721 | ; AVX2-NEXT:    vxorpd %ymm4, %ymm1, %ymm3 | 
|  | 722 | ; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm3, %ymm2 | 
|  | 723 | ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 724 | ; AVX2-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 725 | ; AVX2-NEXT:    vxorpd %ymm4, %ymm0, %ymm2 | 
|  | 726 | ; AVX2-NEXT:    vxorpd %ymm4, %ymm1, %ymm3 | 
|  | 727 | ; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm3, %ymm2 | 
|  | 728 | ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 | 
|  | 729 | ; AVX2-NEXT:    vmovq %xmm0, %rax | 
|  | 730 | ; AVX2-NEXT:    vzeroupper | 
|  | 731 | ; AVX2-NEXT:    retq | 
|  | 732 | ; | 
|  | 733 | ; AVX512-LABEL: test_v16i64: | 
|  | 734 | ; AVX512:       # %bb.0: | 
|  | 735 | ; AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 736 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 737 | ; AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 738 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 739 | ; AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 740 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 741 | ; AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0 | 
|  | 742 | ; AVX512-NEXT:    vmovq %xmm0, %rax | 
|  | 743 | ; AVX512-NEXT:    vzeroupper | 
|  | 744 | ; AVX512-NEXT:    retq | 
|  | 745 | %1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v16i64(<16 x i64> %a0) | 
|  | 746 | ret i64 %1 | 
|  | 747 | } | 
|  | 748 |  | 
|  | 749 | ; | 
|  | 750 | ; vXi32 | 
|  | 751 | ; | 
|  | 752 |  | 
|  | 753 | define i32 @test_v4i32(<4 x i32> %a0) { | 
|  | 754 | ; SSE2-LABEL: test_v4i32: | 
|  | 755 | ; SSE2:       # %bb.0: | 
|  | 756 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 757 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 758 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 759 | ; SSE2-NEXT:    pxor %xmm2, %xmm3 | 
|  | 760 | ; SSE2-NEXT:    movdqa %xmm1, %xmm4 | 
|  | 761 | ; SSE2-NEXT:    pxor %xmm2, %xmm4 | 
|  | 762 | ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4 | 
|  | 763 | ; SSE2-NEXT:    pand %xmm4, %xmm0 | 
|  | 764 | ; SSE2-NEXT:    pandn %xmm1, %xmm4 | 
|  | 765 | ; SSE2-NEXT:    por %xmm0, %xmm4 | 
|  | 766 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,2,3] | 
|  | 767 | ; SSE2-NEXT:    movdqa %xmm4, %xmm1 | 
|  | 768 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 769 | ; SSE2-NEXT:    pxor %xmm0, %xmm2 | 
|  | 770 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2 | 
|  | 771 | ; SSE2-NEXT:    pand %xmm2, %xmm4 | 
|  | 772 | ; SSE2-NEXT:    pandn %xmm0, %xmm2 | 
|  | 773 | ; SSE2-NEXT:    por %xmm4, %xmm2 | 
|  | 774 | ; SSE2-NEXT:    movd %xmm2, %eax | 
|  | 775 | ; SSE2-NEXT:    retq | 
|  | 776 | ; | 
|  | 777 | ; SSE41-LABEL: test_v4i32: | 
|  | 778 | ; SSE41:       # %bb.0: | 
|  | 779 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 780 | ; SSE41-NEXT:    pminud %xmm0, %xmm1 | 
|  | 781 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 782 | ; SSE41-NEXT:    pminud %xmm1, %xmm0 | 
|  | 783 | ; SSE41-NEXT:    movd %xmm0, %eax | 
|  | 784 | ; SSE41-NEXT:    retq | 
|  | 785 | ; | 
|  | 786 | ; AVX-LABEL: test_v4i32: | 
|  | 787 | ; AVX:       # %bb.0: | 
|  | 788 | ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 789 | ; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 790 | ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 791 | ; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 792 | ; AVX-NEXT:    vmovd %xmm0, %eax | 
|  | 793 | ; AVX-NEXT:    retq | 
|  | 794 | ; | 
|  | 795 | ; AVX512-LABEL: test_v4i32: | 
|  | 796 | ; AVX512:       # %bb.0: | 
|  | 797 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 798 | ; AVX512-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 799 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 800 | ; AVX512-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 801 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 802 | ; AVX512-NEXT:    retq | 
|  | 803 | %1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v4i32(<4 x i32> %a0) | 
|  | 804 | ret i32 %1 | 
|  | 805 | } | 
|  | 806 |  | 
|  | 807 | define i32 @test_v8i32(<8 x i32> %a0) { | 
|  | 808 | ; SSE2-LABEL: test_v8i32: | 
|  | 809 | ; SSE2:       # %bb.0: | 
|  | 810 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 811 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 812 | ; SSE2-NEXT:    pxor %xmm2, %xmm3 | 
|  | 813 | ; SSE2-NEXT:    movdqa %xmm1, %xmm4 | 
|  | 814 | ; SSE2-NEXT:    pxor %xmm2, %xmm4 | 
|  | 815 | ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4 | 
|  | 816 | ; SSE2-NEXT:    pand %xmm4, %xmm0 | 
|  | 817 | ; SSE2-NEXT:    pandn %xmm1, %xmm4 | 
|  | 818 | ; SSE2-NEXT:    por %xmm0, %xmm4 | 
|  | 819 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1] | 
|  | 820 | ; SSE2-NEXT:    movdqa %xmm4, %xmm1 | 
|  | 821 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 822 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 823 | ; SSE2-NEXT:    pxor %xmm2, %xmm3 | 
|  | 824 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3 | 
|  | 825 | ; SSE2-NEXT:    pand %xmm3, %xmm4 | 
|  | 826 | ; SSE2-NEXT:    pandn %xmm0, %xmm3 | 
|  | 827 | ; SSE2-NEXT:    por %xmm4, %xmm3 | 
|  | 828 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3] | 
|  | 829 | ; SSE2-NEXT:    movdqa %xmm3, %xmm1 | 
|  | 830 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 831 | ; SSE2-NEXT:    pxor %xmm0, %xmm2 | 
|  | 832 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2 | 
|  | 833 | ; SSE2-NEXT:    pand %xmm2, %xmm3 | 
|  | 834 | ; SSE2-NEXT:    pandn %xmm0, %xmm2 | 
|  | 835 | ; SSE2-NEXT:    por %xmm3, %xmm2 | 
|  | 836 | ; SSE2-NEXT:    movd %xmm2, %eax | 
|  | 837 | ; SSE2-NEXT:    retq | 
|  | 838 | ; | 
|  | 839 | ; SSE41-LABEL: test_v8i32: | 
|  | 840 | ; SSE41:       # %bb.0: | 
|  | 841 | ; SSE41-NEXT:    pminud %xmm1, %xmm0 | 
|  | 842 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 843 | ; SSE41-NEXT:    pminud %xmm0, %xmm1 | 
|  | 844 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 845 | ; SSE41-NEXT:    pminud %xmm1, %xmm0 | 
|  | 846 | ; SSE41-NEXT:    movd %xmm0, %eax | 
|  | 847 | ; SSE41-NEXT:    retq | 
|  | 848 | ; | 
|  | 849 | ; AVX1-LABEL: test_v8i32: | 
|  | 850 | ; AVX1:       # %bb.0: | 
|  | 851 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 852 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 853 | ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 854 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 855 | ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 856 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 857 | ; AVX1-NEXT:    vmovd %xmm0, %eax | 
|  | 858 | ; AVX1-NEXT:    vzeroupper | 
|  | 859 | ; AVX1-NEXT:    retq | 
|  | 860 | ; | 
|  | 861 | ; AVX2-LABEL: test_v8i32: | 
|  | 862 | ; AVX2:       # %bb.0: | 
|  | 863 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 864 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 865 | ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 866 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 867 | ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 868 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 869 | ; AVX2-NEXT:    vmovd %xmm0, %eax | 
|  | 870 | ; AVX2-NEXT:    vzeroupper | 
|  | 871 | ; AVX2-NEXT:    retq | 
|  | 872 | ; | 
|  | 873 | ; AVX512-LABEL: test_v8i32: | 
|  | 874 | ; AVX512:       # %bb.0: | 
|  | 875 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 876 | ; AVX512-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 877 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 878 | ; AVX512-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 879 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 880 | ; AVX512-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 881 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 882 | ; AVX512-NEXT:    vzeroupper | 
|  | 883 | ; AVX512-NEXT:    retq | 
|  | 884 | %1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v8i32(<8 x i32> %a0) | 
|  | 885 | ret i32 %1 | 
|  | 886 | } | 
|  | 887 |  | 
|  | 888 | define i32 @test_v16i32(<16 x i32> %a0) { | 
|  | 889 | ; SSE2-LABEL: test_v16i32: | 
|  | 890 | ; SSE2:       # %bb.0: | 
|  | 891 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 892 | ; SSE2-NEXT:    movdqa %xmm1, %xmm5 | 
|  | 893 | ; SSE2-NEXT:    pxor %xmm4, %xmm5 | 
|  | 894 | ; SSE2-NEXT:    movdqa %xmm3, %xmm6 | 
|  | 895 | ; SSE2-NEXT:    pxor %xmm4, %xmm6 | 
|  | 896 | ; SSE2-NEXT:    pcmpgtd %xmm5, %xmm6 | 
|  | 897 | ; SSE2-NEXT:    pand %xmm6, %xmm1 | 
|  | 898 | ; SSE2-NEXT:    pandn %xmm3, %xmm6 | 
|  | 899 | ; SSE2-NEXT:    por %xmm1, %xmm6 | 
|  | 900 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 901 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 902 | ; SSE2-NEXT:    movdqa %xmm2, %xmm3 | 
|  | 903 | ; SSE2-NEXT:    pxor %xmm4, %xmm3 | 
|  | 904 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3 | 
|  | 905 | ; SSE2-NEXT:    pand %xmm3, %xmm0 | 
|  | 906 | ; SSE2-NEXT:    pandn %xmm2, %xmm3 | 
|  | 907 | ; SSE2-NEXT:    por %xmm0, %xmm3 | 
|  | 908 | ; SSE2-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 909 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 910 | ; SSE2-NEXT:    movdqa %xmm6, %xmm1 | 
|  | 911 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 912 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1 | 
|  | 913 | ; SSE2-NEXT:    pand %xmm1, %xmm3 | 
|  | 914 | ; SSE2-NEXT:    pandn %xmm6, %xmm1 | 
|  | 915 | ; SSE2-NEXT:    por %xmm3, %xmm1 | 
|  | 916 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] | 
|  | 917 | ; SSE2-NEXT:    movdqa %xmm1, %xmm2 | 
|  | 918 | ; SSE2-NEXT:    pxor %xmm4, %xmm2 | 
|  | 919 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 920 | ; SSE2-NEXT:    pxor %xmm4, %xmm3 | 
|  | 921 | ; SSE2-NEXT:    pcmpgtd %xmm2, %xmm3 | 
|  | 922 | ; SSE2-NEXT:    pand %xmm3, %xmm1 | 
|  | 923 | ; SSE2-NEXT:    pandn %xmm0, %xmm3 | 
|  | 924 | ; SSE2-NEXT:    por %xmm1, %xmm3 | 
|  | 925 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3] | 
|  | 926 | ; SSE2-NEXT:    movdqa %xmm3, %xmm1 | 
|  | 927 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 928 | ; SSE2-NEXT:    pxor %xmm0, %xmm4 | 
|  | 929 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4 | 
|  | 930 | ; SSE2-NEXT:    pand %xmm4, %xmm3 | 
|  | 931 | ; SSE2-NEXT:    pandn %xmm0, %xmm4 | 
|  | 932 | ; SSE2-NEXT:    por %xmm3, %xmm4 | 
|  | 933 | ; SSE2-NEXT:    movd %xmm4, %eax | 
|  | 934 | ; SSE2-NEXT:    retq | 
|  | 935 | ; | 
|  | 936 | ; SSE41-LABEL: test_v16i32: | 
|  | 937 | ; SSE41:       # %bb.0: | 
|  | 938 | ; SSE41-NEXT:    pminud %xmm3, %xmm1 | 
|  | 939 | ; SSE41-NEXT:    pminud %xmm2, %xmm0 | 
|  | 940 | ; SSE41-NEXT:    pminud %xmm1, %xmm0 | 
|  | 941 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 942 | ; SSE41-NEXT:    pminud %xmm0, %xmm1 | 
|  | 943 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 944 | ; SSE41-NEXT:    pminud %xmm1, %xmm0 | 
|  | 945 | ; SSE41-NEXT:    movd %xmm0, %eax | 
|  | 946 | ; SSE41-NEXT:    retq | 
|  | 947 | ; | 
|  | 948 | ; AVX1-LABEL: test_v16i32: | 
|  | 949 | ; AVX1:       # %bb.0: | 
|  | 950 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2 | 
|  | 951 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3 | 
|  | 952 | ; AVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm2 | 
|  | 953 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 954 | ; AVX1-NEXT:    vpminud %xmm2, %xmm0, %xmm0 | 
|  | 955 | ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 956 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 957 | ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 958 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 959 | ; AVX1-NEXT:    vmovd %xmm0, %eax | 
|  | 960 | ; AVX1-NEXT:    vzeroupper | 
|  | 961 | ; AVX1-NEXT:    retq | 
|  | 962 | ; | 
|  | 963 | ; AVX2-LABEL: test_v16i32: | 
|  | 964 | ; AVX2:       # %bb.0: | 
|  | 965 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 966 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 967 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 968 | ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 969 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 970 | ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 971 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 972 | ; AVX2-NEXT:    vmovd %xmm0, %eax | 
|  | 973 | ; AVX2-NEXT:    vzeroupper | 
|  | 974 | ; AVX2-NEXT:    retq | 
|  | 975 | ; | 
|  | 976 | ; AVX512-LABEL: test_v16i32: | 
|  | 977 | ; AVX512:       # %bb.0: | 
|  | 978 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 979 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 980 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 981 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 982 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 983 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 984 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 985 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 986 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 987 | ; AVX512-NEXT:    vzeroupper | 
|  | 988 | ; AVX512-NEXT:    retq | 
|  | 989 | %1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v16i32(<16 x i32> %a0) | 
|  | 990 | ret i32 %1 | 
|  | 991 | } | 
|  | 992 |  | 
|  | 993 | define i32 @test_v32i32(<32 x i32> %a0) { | 
|  | 994 | ; SSE2-LABEL: test_v32i32: | 
|  | 995 | ; SSE2:       # %bb.0: | 
|  | 996 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648] | 
|  | 997 | ; SSE2-NEXT:    movdqa %xmm2, %xmm10 | 
|  | 998 | ; SSE2-NEXT:    pxor %xmm8, %xmm10 | 
|  | 999 | ; SSE2-NEXT:    movdqa %xmm6, %xmm9 | 
|  | 1000 | ; SSE2-NEXT:    pxor %xmm8, %xmm9 | 
|  | 1001 | ; SSE2-NEXT:    pcmpgtd %xmm10, %xmm9 | 
|  | 1002 | ; SSE2-NEXT:    pand %xmm9, %xmm2 | 
|  | 1003 | ; SSE2-NEXT:    pandn %xmm6, %xmm9 | 
|  | 1004 | ; SSE2-NEXT:    por %xmm2, %xmm9 | 
|  | 1005 | ; SSE2-NEXT:    movdqa %xmm0, %xmm6 | 
|  | 1006 | ; SSE2-NEXT:    pxor %xmm8, %xmm6 | 
|  | 1007 | ; SSE2-NEXT:    movdqa %xmm4, %xmm2 | 
|  | 1008 | ; SSE2-NEXT:    pxor %xmm8, %xmm2 | 
|  | 1009 | ; SSE2-NEXT:    pcmpgtd %xmm6, %xmm2 | 
|  | 1010 | ; SSE2-NEXT:    pand %xmm2, %xmm0 | 
|  | 1011 | ; SSE2-NEXT:    pandn %xmm4, %xmm2 | 
|  | 1012 | ; SSE2-NEXT:    por %xmm0, %xmm2 | 
|  | 1013 | ; SSE2-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 1014 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1015 | ; SSE2-NEXT:    movdqa %xmm7, %xmm4 | 
|  | 1016 | ; SSE2-NEXT:    pxor %xmm8, %xmm4 | 
|  | 1017 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm4 | 
|  | 1018 | ; SSE2-NEXT:    pand %xmm4, %xmm3 | 
|  | 1019 | ; SSE2-NEXT:    pandn %xmm7, %xmm4 | 
|  | 1020 | ; SSE2-NEXT:    por %xmm3, %xmm4 | 
|  | 1021 | ; SSE2-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 1022 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1023 | ; SSE2-NEXT:    movdqa %xmm5, %xmm3 | 
|  | 1024 | ; SSE2-NEXT:    pxor %xmm8, %xmm3 | 
|  | 1025 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm3 | 
|  | 1026 | ; SSE2-NEXT:    pand %xmm3, %xmm1 | 
|  | 1027 | ; SSE2-NEXT:    pandn %xmm5, %xmm3 | 
|  | 1028 | ; SSE2-NEXT:    por %xmm1, %xmm3 | 
|  | 1029 | ; SSE2-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 1030 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1031 | ; SSE2-NEXT:    movdqa %xmm4, %xmm1 | 
|  | 1032 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1033 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1 | 
|  | 1034 | ; SSE2-NEXT:    pand %xmm1, %xmm3 | 
|  | 1035 | ; SSE2-NEXT:    pandn %xmm4, %xmm1 | 
|  | 1036 | ; SSE2-NEXT:    por %xmm3, %xmm1 | 
|  | 1037 | ; SSE2-NEXT:    movdqa %xmm2, %xmm0 | 
|  | 1038 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1039 | ; SSE2-NEXT:    movdqa %xmm9, %xmm3 | 
|  | 1040 | ; SSE2-NEXT:    pxor %xmm8, %xmm3 | 
|  | 1041 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm3 | 
|  | 1042 | ; SSE2-NEXT:    pand %xmm3, %xmm2 | 
|  | 1043 | ; SSE2-NEXT:    pandn %xmm9, %xmm3 | 
|  | 1044 | ; SSE2-NEXT:    por %xmm2, %xmm3 | 
|  | 1045 | ; SSE2-NEXT:    movdqa %xmm3, %xmm0 | 
|  | 1046 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1047 | ; SSE2-NEXT:    movdqa %xmm1, %xmm2 | 
|  | 1048 | ; SSE2-NEXT:    pxor %xmm8, %xmm2 | 
|  | 1049 | ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2 | 
|  | 1050 | ; SSE2-NEXT:    pand %xmm2, %xmm3 | 
|  | 1051 | ; SSE2-NEXT:    pandn %xmm1, %xmm2 | 
|  | 1052 | ; SSE2-NEXT:    por %xmm3, %xmm2 | 
|  | 1053 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] | 
|  | 1054 | ; SSE2-NEXT:    movdqa %xmm2, %xmm1 | 
|  | 1055 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1056 | ; SSE2-NEXT:    movdqa %xmm0, %xmm3 | 
|  | 1057 | ; SSE2-NEXT:    pxor %xmm8, %xmm3 | 
|  | 1058 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3 | 
|  | 1059 | ; SSE2-NEXT:    pand %xmm3, %xmm2 | 
|  | 1060 | ; SSE2-NEXT:    pandn %xmm0, %xmm3 | 
|  | 1061 | ; SSE2-NEXT:    por %xmm2, %xmm3 | 
|  | 1062 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3] | 
|  | 1063 | ; SSE2-NEXT:    movdqa %xmm3, %xmm1 | 
|  | 1064 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1065 | ; SSE2-NEXT:    pxor %xmm0, %xmm8 | 
|  | 1066 | ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm8 | 
|  | 1067 | ; SSE2-NEXT:    pand %xmm8, %xmm3 | 
|  | 1068 | ; SSE2-NEXT:    pandn %xmm0, %xmm8 | 
|  | 1069 | ; SSE2-NEXT:    por %xmm3, %xmm8 | 
|  | 1070 | ; SSE2-NEXT:    movd %xmm8, %eax | 
|  | 1071 | ; SSE2-NEXT:    retq | 
|  | 1072 | ; | 
|  | 1073 | ; SSE41-LABEL: test_v32i32: | 
|  | 1074 | ; SSE41:       # %bb.0: | 
|  | 1075 | ; SSE41-NEXT:    pminud %xmm6, %xmm2 | 
|  | 1076 | ; SSE41-NEXT:    pminud %xmm4, %xmm0 | 
|  | 1077 | ; SSE41-NEXT:    pminud %xmm2, %xmm0 | 
|  | 1078 | ; SSE41-NEXT:    pminud %xmm7, %xmm3 | 
|  | 1079 | ; SSE41-NEXT:    pminud %xmm5, %xmm1 | 
|  | 1080 | ; SSE41-NEXT:    pminud %xmm3, %xmm1 | 
|  | 1081 | ; SSE41-NEXT:    pminud %xmm0, %xmm1 | 
|  | 1082 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] | 
|  | 1083 | ; SSE41-NEXT:    pminud %xmm1, %xmm0 | 
|  | 1084 | ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 1085 | ; SSE41-NEXT:    pminud %xmm0, %xmm1 | 
|  | 1086 | ; SSE41-NEXT:    movd %xmm1, %eax | 
|  | 1087 | ; SSE41-NEXT:    retq | 
|  | 1088 | ; | 
|  | 1089 | ; AVX1-LABEL: test_v32i32: | 
|  | 1090 | ; AVX1:       # %bb.0: | 
|  | 1091 | ; AVX1-NEXT:    vpminud %xmm3, %xmm1, %xmm4 | 
|  | 1092 | ; AVX1-NEXT:    vpminud %xmm2, %xmm0, %xmm5 | 
|  | 1093 | ; AVX1-NEXT:    vpminud %xmm4, %xmm5, %xmm4 | 
|  | 1094 | ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3 | 
|  | 1095 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1 | 
|  | 1096 | ; AVX1-NEXT:    vpminud %xmm3, %xmm1, %xmm1 | 
|  | 1097 | ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2 | 
|  | 1098 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0 | 
|  | 1099 | ; AVX1-NEXT:    vpminud %xmm2, %xmm0, %xmm0 | 
|  | 1100 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 1101 | ; AVX1-NEXT:    vpminud %xmm0, %xmm4, %xmm0 | 
|  | 1102 | ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1103 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 1104 | ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 1105 | ; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 | 
|  | 1106 | ; AVX1-NEXT:    vmovd %xmm0, %eax | 
|  | 1107 | ; AVX1-NEXT:    vzeroupper | 
|  | 1108 | ; AVX1-NEXT:    retq | 
|  | 1109 | ; | 
|  | 1110 | ; AVX2-LABEL: test_v32i32: | 
|  | 1111 | ; AVX2:       # %bb.0: | 
|  | 1112 | ; AVX2-NEXT:    vpminud %ymm3, %ymm1, %ymm1 | 
|  | 1113 | ; AVX2-NEXT:    vpminud %ymm2, %ymm0, %ymm0 | 
|  | 1114 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 1115 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1116 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 1117 | ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1118 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 1119 | ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 1120 | ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 | 
|  | 1121 | ; AVX2-NEXT:    vmovd %xmm0, %eax | 
|  | 1122 | ; AVX2-NEXT:    vzeroupper | 
|  | 1123 | ; AVX2-NEXT:    retq | 
|  | 1124 | ; | 
|  | 1125 | ; AVX512-LABEL: test_v32i32: | 
|  | 1126 | ; AVX512:       # %bb.0: | 
|  | 1127 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 1128 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 1129 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 1130 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1131 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 1132 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1133 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 1134 | ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 1135 | ; AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0 | 
|  | 1136 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 1137 | ; AVX512-NEXT:    vzeroupper | 
|  | 1138 | ; AVX512-NEXT:    retq | 
|  | 1139 | %1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v32i32(<32 x i32> %a0) | 
|  | 1140 | ret i32 %1 | 
|  | 1141 | } | 
|  | 1142 |  | 
|  | 1143 | ; | 
|  | 1144 | ; vXi16 | 
|  | 1145 | ; | 
|  | 1146 |  | 
|  | 1147 | define i16 @test_v8i16(<8 x i16> %a0) { | 
|  | 1148 | ; SSE2-LABEL: test_v8i16: | 
|  | 1149 | ; SSE2:       # %bb.0: | 
|  | 1150 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1151 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] | 
|  | 1152 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1153 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1154 | ; SSE2-NEXT:    pminsw %xmm0, %xmm1 | 
|  | 1155 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1156 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 1157 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1158 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1159 | ; SSE2-NEXT:    pminsw %xmm1, %xmm0 | 
|  | 1160 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1161 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1162 | ; SSE2-NEXT:    psrld $16, %xmm1 | 
|  | 1163 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1164 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1165 | ; SSE2-NEXT:    pminsw %xmm0, %xmm1 | 
|  | 1166 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1167 | ; SSE2-NEXT:    movd %xmm1, %eax | 
|  | 1168 | ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1169 | ; SSE2-NEXT:    retq | 
|  | 1170 | ; | 
|  | 1171 | ; SSE41-LABEL: test_v8i16: | 
|  | 1172 | ; SSE41:       # %bb.0: | 
|  | 1173 | ; SSE41-NEXT:    phminposuw %xmm0, %xmm0 | 
|  | 1174 | ; SSE41-NEXT:    movd %xmm0, %eax | 
|  | 1175 | ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1176 | ; SSE41-NEXT:    retq | 
|  | 1177 | ; | 
|  | 1178 | ; AVX-LABEL: test_v8i16: | 
|  | 1179 | ; AVX:       # %bb.0: | 
|  | 1180 | ; AVX-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1181 | ; AVX-NEXT:    vmovd %xmm0, %eax | 
|  | 1182 | ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1183 | ; AVX-NEXT:    retq | 
|  | 1184 | ; | 
|  | 1185 | ; AVX512-LABEL: test_v8i16: | 
|  | 1186 | ; AVX512:       # %bb.0: | 
|  | 1187 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1188 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 1189 | ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1190 | ; AVX512-NEXT:    retq | 
|  | 1191 | %1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v8i16(<8 x i16> %a0) | 
|  | 1192 | ret i16 %1 | 
|  | 1193 | } | 
|  | 1194 |  | 
|  | 1195 | define i16 @test_v16i16(<16 x i16> %a0) { | 
|  | 1196 | ; SSE2-LABEL: test_v16i16: | 
|  | 1197 | ; SSE2:       # %bb.0: | 
|  | 1198 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] | 
|  | 1199 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1200 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1201 | ; SSE2-NEXT:    pminsw %xmm1, %xmm0 | 
|  | 1202 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1203 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1204 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1205 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1206 | ; SSE2-NEXT:    pminsw %xmm0, %xmm1 | 
|  | 1207 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1208 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 1209 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1210 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1211 | ; SSE2-NEXT:    pminsw %xmm1, %xmm0 | 
|  | 1212 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1213 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1214 | ; SSE2-NEXT:    psrld $16, %xmm1 | 
|  | 1215 | ; SSE2-NEXT:    pxor %xmm2, %xmm0 | 
|  | 1216 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1217 | ; SSE2-NEXT:    pminsw %xmm0, %xmm1 | 
|  | 1218 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1219 | ; SSE2-NEXT:    movd %xmm1, %eax | 
|  | 1220 | ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1221 | ; SSE2-NEXT:    retq | 
|  | 1222 | ; | 
|  | 1223 | ; SSE41-LABEL: test_v16i16: | 
|  | 1224 | ; SSE41:       # %bb.0: | 
|  | 1225 | ; SSE41-NEXT:    pminuw %xmm1, %xmm0 | 
|  | 1226 | ; SSE41-NEXT:    phminposuw %xmm0, %xmm0 | 
|  | 1227 | ; SSE41-NEXT:    movd %xmm0, %eax | 
|  | 1228 | ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1229 | ; SSE41-NEXT:    retq | 
|  | 1230 | ; | 
|  | 1231 | ; AVX1-LABEL: test_v16i16: | 
|  | 1232 | ; AVX1:       # %bb.0: | 
|  | 1233 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 1234 | ; AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1235 | ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1236 | ; AVX1-NEXT:    vmovd %xmm0, %eax | 
|  | 1237 | ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1238 | ; AVX1-NEXT:    vzeroupper | 
|  | 1239 | ; AVX1-NEXT:    retq | 
|  | 1240 | ; | 
|  | 1241 | ; AVX2-LABEL: test_v16i16: | 
|  | 1242 | ; AVX2:       # %bb.0: | 
|  | 1243 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1244 | ; AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1245 | ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1246 | ; AVX2-NEXT:    vmovd %xmm0, %eax | 
|  | 1247 | ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1248 | ; AVX2-NEXT:    vzeroupper | 
|  | 1249 | ; AVX2-NEXT:    retq | 
|  | 1250 | ; | 
|  | 1251 | ; AVX512-LABEL: test_v16i16: | 
|  | 1252 | ; AVX512:       # %bb.0: | 
|  | 1253 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1254 | ; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1255 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1256 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 1257 | ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1258 | ; AVX512-NEXT:    vzeroupper | 
|  | 1259 | ; AVX512-NEXT:    retq | 
|  | 1260 | %1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v16i16(<16 x i16> %a0) | 
|  | 1261 | ret i16 %1 | 
|  | 1262 | } | 
|  | 1263 |  | 
|  | 1264 | define i16 @test_v32i16(<32 x i16> %a0) { | 
|  | 1265 | ; SSE2-LABEL: test_v32i16: | 
|  | 1266 | ; SSE2:       # %bb.0: | 
|  | 1267 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] | 
|  | 1268 | ; SSE2-NEXT:    pxor %xmm4, %xmm2 | 
|  | 1269 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1270 | ; SSE2-NEXT:    pminsw %xmm2, %xmm0 | 
|  | 1271 | ; SSE2-NEXT:    pxor %xmm4, %xmm3 | 
|  | 1272 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 1273 | ; SSE2-NEXT:    pminsw %xmm3, %xmm1 | 
|  | 1274 | ; SSE2-NEXT:    movdqa %xmm4, %xmm2 | 
|  | 1275 | ; SSE2-NEXT:    pxor %xmm4, %xmm2 | 
|  | 1276 | ; SSE2-NEXT:    pxor %xmm2, %xmm1 | 
|  | 1277 | ; SSE2-NEXT:    pxor %xmm0, %xmm2 | 
|  | 1278 | ; SSE2-NEXT:    pminsw %xmm1, %xmm2 | 
|  | 1279 | ; SSE2-NEXT:    pxor %xmm4, %xmm2 | 
|  | 1280 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] | 
|  | 1281 | ; SSE2-NEXT:    pxor %xmm4, %xmm2 | 
|  | 1282 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1283 | ; SSE2-NEXT:    pminsw %xmm2, %xmm0 | 
|  | 1284 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1285 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 1286 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1287 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 1288 | ; SSE2-NEXT:    pminsw %xmm0, %xmm1 | 
|  | 1289 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 1290 | ; SSE2-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 1291 | ; SSE2-NEXT:    psrld $16, %xmm0 | 
|  | 1292 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 1293 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1294 | ; SSE2-NEXT:    pminsw %xmm1, %xmm0 | 
|  | 1295 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1296 | ; SSE2-NEXT:    movd %xmm0, %eax | 
|  | 1297 | ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1298 | ; SSE2-NEXT:    retq | 
|  | 1299 | ; | 
|  | 1300 | ; SSE41-LABEL: test_v32i16: | 
|  | 1301 | ; SSE41:       # %bb.0: | 
|  | 1302 | ; SSE41-NEXT:    pminuw %xmm3, %xmm1 | 
|  | 1303 | ; SSE41-NEXT:    pminuw %xmm2, %xmm0 | 
|  | 1304 | ; SSE41-NEXT:    pminuw %xmm1, %xmm0 | 
|  | 1305 | ; SSE41-NEXT:    phminposuw %xmm0, %xmm0 | 
|  | 1306 | ; SSE41-NEXT:    movd %xmm0, %eax | 
|  | 1307 | ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1308 | ; SSE41-NEXT:    retq | 
|  | 1309 | ; | 
|  | 1310 | ; AVX1-LABEL: test_v32i16: | 
|  | 1311 | ; AVX1:       # %bb.0: | 
|  | 1312 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2 | 
|  | 1313 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3 | 
|  | 1314 | ; AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2 | 
|  | 1315 | ; AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1316 | ; AVX1-NEXT:    vpminuw %xmm2, %xmm0, %xmm0 | 
|  | 1317 | ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1318 | ; AVX1-NEXT:    vmovd %xmm0, %eax | 
|  | 1319 | ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1320 | ; AVX1-NEXT:    vzeroupper | 
|  | 1321 | ; AVX1-NEXT:    retq | 
|  | 1322 | ; | 
|  | 1323 | ; AVX2-LABEL: test_v32i16: | 
|  | 1324 | ; AVX2:       # %bb.0: | 
|  | 1325 | ; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 | 
|  | 1326 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1327 | ; AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1328 | ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1329 | ; AVX2-NEXT:    vmovd %xmm0, %eax | 
|  | 1330 | ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1331 | ; AVX2-NEXT:    vzeroupper | 
|  | 1332 | ; AVX2-NEXT:    retq | 
|  | 1333 | ; | 
|  | 1334 | ; AVX512-LABEL: test_v32i16: | 
|  | 1335 | ; AVX512:       # %bb.0: | 
|  | 1336 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 1337 | ; AVX512-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 | 
|  | 1338 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1339 | ; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1340 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1341 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 1342 | ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1343 | ; AVX512-NEXT:    vzeroupper | 
|  | 1344 | ; AVX512-NEXT:    retq | 
|  | 1345 | %1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v32i16(<32 x i16> %a0) | 
|  | 1346 | ret i16 %1 | 
|  | 1347 | } | 
|  | 1348 |  | 
|  | 1349 | define i16 @test_v64i16(<64 x i16> %a0) { | 
|  | 1350 | ; SSE2-LABEL: test_v64i16: | 
|  | 1351 | ; SSE2:       # %bb.0: | 
|  | 1352 | ; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [32768,32768,32768,32768,32768,32768,32768,32768] | 
|  | 1353 | ; SSE2-NEXT:    pxor %xmm8, %xmm5 | 
|  | 1354 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1355 | ; SSE2-NEXT:    pminsw %xmm5, %xmm1 | 
|  | 1356 | ; SSE2-NEXT:    pxor %xmm8, %xmm7 | 
|  | 1357 | ; SSE2-NEXT:    pxor %xmm8, %xmm3 | 
|  | 1358 | ; SSE2-NEXT:    pminsw %xmm7, %xmm3 | 
|  | 1359 | ; SSE2-NEXT:    pxor %xmm8, %xmm4 | 
|  | 1360 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1361 | ; SSE2-NEXT:    pminsw %xmm4, %xmm0 | 
|  | 1362 | ; SSE2-NEXT:    pxor %xmm8, %xmm6 | 
|  | 1363 | ; SSE2-NEXT:    pxor %xmm8, %xmm2 | 
|  | 1364 | ; SSE2-NEXT:    pminsw %xmm6, %xmm2 | 
|  | 1365 | ; SSE2-NEXT:    movdqa %xmm8, %xmm4 | 
|  | 1366 | ; SSE2-NEXT:    pxor %xmm8, %xmm4 | 
|  | 1367 | ; SSE2-NEXT:    pxor %xmm4, %xmm2 | 
|  | 1368 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1369 | ; SSE2-NEXT:    pminsw %xmm2, %xmm0 | 
|  | 1370 | ; SSE2-NEXT:    pxor %xmm4, %xmm3 | 
|  | 1371 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 1372 | ; SSE2-NEXT:    pminsw %xmm3, %xmm1 | 
|  | 1373 | ; SSE2-NEXT:    pxor %xmm4, %xmm1 | 
|  | 1374 | ; SSE2-NEXT:    pxor %xmm4, %xmm0 | 
|  | 1375 | ; SSE2-NEXT:    pminsw %xmm1, %xmm0 | 
|  | 1376 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1377 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1378 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1379 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1380 | ; SSE2-NEXT:    pminsw %xmm0, %xmm1 | 
|  | 1381 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1382 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 1383 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1384 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1385 | ; SSE2-NEXT:    pminsw %xmm1, %xmm0 | 
|  | 1386 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1387 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1388 | ; SSE2-NEXT:    psrld $16, %xmm1 | 
|  | 1389 | ; SSE2-NEXT:    pxor %xmm8, %xmm0 | 
|  | 1390 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1391 | ; SSE2-NEXT:    pminsw %xmm0, %xmm1 | 
|  | 1392 | ; SSE2-NEXT:    pxor %xmm8, %xmm1 | 
|  | 1393 | ; SSE2-NEXT:    movd %xmm1, %eax | 
|  | 1394 | ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1395 | ; SSE2-NEXT:    retq | 
|  | 1396 | ; | 
|  | 1397 | ; SSE41-LABEL: test_v64i16: | 
|  | 1398 | ; SSE41:       # %bb.0: | 
|  | 1399 | ; SSE41-NEXT:    pminuw %xmm7, %xmm3 | 
|  | 1400 | ; SSE41-NEXT:    pminuw %xmm5, %xmm1 | 
|  | 1401 | ; SSE41-NEXT:    pminuw %xmm3, %xmm1 | 
|  | 1402 | ; SSE41-NEXT:    pminuw %xmm6, %xmm2 | 
|  | 1403 | ; SSE41-NEXT:    pminuw %xmm4, %xmm0 | 
|  | 1404 | ; SSE41-NEXT:    pminuw %xmm2, %xmm0 | 
|  | 1405 | ; SSE41-NEXT:    pminuw %xmm1, %xmm0 | 
|  | 1406 | ; SSE41-NEXT:    phminposuw %xmm0, %xmm0 | 
|  | 1407 | ; SSE41-NEXT:    movd %xmm0, %eax | 
|  | 1408 | ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1409 | ; SSE41-NEXT:    retq | 
|  | 1410 | ; | 
|  | 1411 | ; AVX1-LABEL: test_v64i16: | 
|  | 1412 | ; AVX1:       # %bb.0: | 
|  | 1413 | ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4 | 
|  | 1414 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5 | 
|  | 1415 | ; AVX1-NEXT:    vpminuw %xmm4, %xmm5, %xmm4 | 
|  | 1416 | ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5 | 
|  | 1417 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6 | 
|  | 1418 | ; AVX1-NEXT:    vpminuw %xmm5, %xmm6, %xmm5 | 
|  | 1419 | ; AVX1-NEXT:    vpminuw %xmm4, %xmm5, %xmm4 | 
|  | 1420 | ; AVX1-NEXT:    vpminuw %xmm3, %xmm1, %xmm1 | 
|  | 1421 | ; AVX1-NEXT:    vpminuw %xmm2, %xmm0, %xmm0 | 
|  | 1422 | ; AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1423 | ; AVX1-NEXT:    vpminuw %xmm4, %xmm0, %xmm0 | 
|  | 1424 | ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1425 | ; AVX1-NEXT:    vmovd %xmm0, %eax | 
|  | 1426 | ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1427 | ; AVX1-NEXT:    vzeroupper | 
|  | 1428 | ; AVX1-NEXT:    retq | 
|  | 1429 | ; | 
|  | 1430 | ; AVX2-LABEL: test_v64i16: | 
|  | 1431 | ; AVX2:       # %bb.0: | 
|  | 1432 | ; AVX2-NEXT:    vpminuw %ymm3, %ymm1, %ymm1 | 
|  | 1433 | ; AVX2-NEXT:    vpminuw %ymm2, %ymm0, %ymm0 | 
|  | 1434 | ; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 | 
|  | 1435 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1436 | ; AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1437 | ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1438 | ; AVX2-NEXT:    vmovd %xmm0, %eax | 
|  | 1439 | ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1440 | ; AVX2-NEXT:    vzeroupper | 
|  | 1441 | ; AVX2-NEXT:    retq | 
|  | 1442 | ; | 
|  | 1443 | ; AVX512-LABEL: test_v64i16: | 
|  | 1444 | ; AVX512:       # %bb.0: | 
|  | 1445 | ; AVX512-NEXT:    vpminuw %zmm1, %zmm0, %zmm0 | 
|  | 1446 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 1447 | ; AVX512-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 | 
|  | 1448 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1449 | ; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 | 
|  | 1450 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1451 | ; AVX512-NEXT:    vmovd %xmm0, %eax | 
|  | 1452 | ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax | 
|  | 1453 | ; AVX512-NEXT:    vzeroupper | 
|  | 1454 | ; AVX512-NEXT:    retq | 
|  | 1455 | %1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v64i16(<64 x i16> %a0) | 
|  | 1456 | ret i16 %1 | 
|  | 1457 | } | 
|  | 1458 |  | 
|  | 1459 | ; | 
|  | 1460 | ; vXi8 | 
|  | 1461 | ; | 
|  | 1462 |  | 
|  | 1463 | define i8 @test_v16i8(<16 x i8> %a0) { | 
|  | 1464 | ; SSE2-LABEL: test_v16i8: | 
|  | 1465 | ; SSE2:       # %bb.0: | 
|  | 1466 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1467 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1468 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 1469 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1470 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1471 | ; SSE2-NEXT:    psrld $16, %xmm1 | 
|  | 1472 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1473 | ; SSE2-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 1474 | ; SSE2-NEXT:    psrlw $8, %xmm0 | 
|  | 1475 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1476 | ; SSE2-NEXT:    movd %xmm0, %eax | 
|  | 1477 | ; SSE2-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1478 | ; SSE2-NEXT:    retq | 
|  | 1479 | ; | 
|  | 1480 | ; SSE41-LABEL: test_v16i8: | 
|  | 1481 | ; SSE41:       # %bb.0: | 
|  | 1482 | ; SSE41-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1483 | ; SSE41-NEXT:    psrlw $8, %xmm1 | 
|  | 1484 | ; SSE41-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1485 | ; SSE41-NEXT:    phminposuw %xmm1, %xmm0 | 
|  | 1486 | ; SSE41-NEXT:    pextrb $0, %xmm0, %eax | 
|  | 1487 | ; SSE41-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1488 | ; SSE41-NEXT:    retq | 
|  | 1489 | ; | 
|  | 1490 | ; AVX-LABEL: test_v16i8: | 
|  | 1491 | ; AVX:       # %bb.0: | 
|  | 1492 | ; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1493 | ; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1494 | ; AVX-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1495 | ; AVX-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1496 | ; AVX-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1497 | ; AVX-NEXT:    retq | 
|  | 1498 | ; | 
|  | 1499 | ; AVX512-LABEL: test_v16i8: | 
|  | 1500 | ; AVX512:       # %bb.0: | 
|  | 1501 | ; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1502 | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1503 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1504 | ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1505 | ; AVX512-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1506 | ; AVX512-NEXT:    retq | 
|  | 1507 | %1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v16i8(<16 x i8> %a0) | 
|  | 1508 | ret i8 %1 | 
|  | 1509 | } | 
|  | 1510 |  | 
|  | 1511 | define i8 @test_v32i8(<32 x i8> %a0) { | 
|  | 1512 | ; SSE2-LABEL: test_v32i8: | 
|  | 1513 | ; SSE2:       # %bb.0: | 
|  | 1514 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1515 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1516 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1517 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 1518 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1519 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1520 | ; SSE2-NEXT:    psrld $16, %xmm1 | 
|  | 1521 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1522 | ; SSE2-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 1523 | ; SSE2-NEXT:    psrlw $8, %xmm0 | 
|  | 1524 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1525 | ; SSE2-NEXT:    movd %xmm0, %eax | 
|  | 1526 | ; SSE2-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1527 | ; SSE2-NEXT:    retq | 
|  | 1528 | ; | 
|  | 1529 | ; SSE41-LABEL: test_v32i8: | 
|  | 1530 | ; SSE41:       # %bb.0: | 
|  | 1531 | ; SSE41-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1532 | ; SSE41-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1533 | ; SSE41-NEXT:    psrlw $8, %xmm1 | 
|  | 1534 | ; SSE41-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1535 | ; SSE41-NEXT:    phminposuw %xmm1, %xmm0 | 
|  | 1536 | ; SSE41-NEXT:    pextrb $0, %xmm0, %eax | 
|  | 1537 | ; SSE41-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1538 | ; SSE41-NEXT:    retq | 
|  | 1539 | ; | 
|  | 1540 | ; AVX1-LABEL: test_v32i8: | 
|  | 1541 | ; AVX1:       # %bb.0: | 
|  | 1542 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1 | 
|  | 1543 | ; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1544 | ; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1545 | ; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1546 | ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1547 | ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1548 | ; AVX1-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1549 | ; AVX1-NEXT:    vzeroupper | 
|  | 1550 | ; AVX1-NEXT:    retq | 
|  | 1551 | ; | 
|  | 1552 | ; AVX2-LABEL: test_v32i8: | 
|  | 1553 | ; AVX2:       # %bb.0: | 
|  | 1554 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1555 | ; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1556 | ; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1557 | ; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1558 | ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1559 | ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1560 | ; AVX2-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1561 | ; AVX2-NEXT:    vzeroupper | 
|  | 1562 | ; AVX2-NEXT:    retq | 
|  | 1563 | ; | 
|  | 1564 | ; AVX512-LABEL: test_v32i8: | 
|  | 1565 | ; AVX512:       # %bb.0: | 
|  | 1566 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1567 | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1568 | ; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1569 | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1570 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1571 | ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1572 | ; AVX512-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1573 | ; AVX512-NEXT:    vzeroupper | 
|  | 1574 | ; AVX512-NEXT:    retq | 
|  | 1575 | %1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v32i8(<32 x i8> %a0) | 
|  | 1576 | ret i8 %1 | 
|  | 1577 | } | 
|  | 1578 |  | 
|  | 1579 | define i8 @test_v64i8(<64 x i8> %a0) { | 
|  | 1580 | ; SSE2-LABEL: test_v64i8: | 
|  | 1581 | ; SSE2:       # %bb.0: | 
|  | 1582 | ; SSE2-NEXT:    pminub %xmm3, %xmm1 | 
|  | 1583 | ; SSE2-NEXT:    pminub %xmm2, %xmm0 | 
|  | 1584 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1585 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] | 
|  | 1586 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1587 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] | 
|  | 1588 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1589 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1590 | ; SSE2-NEXT:    psrld $16, %xmm1 | 
|  | 1591 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1592 | ; SSE2-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 1593 | ; SSE2-NEXT:    psrlw $8, %xmm0 | 
|  | 1594 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1595 | ; SSE2-NEXT:    movd %xmm0, %eax | 
|  | 1596 | ; SSE2-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1597 | ; SSE2-NEXT:    retq | 
|  | 1598 | ; | 
|  | 1599 | ; SSE41-LABEL: test_v64i8: | 
|  | 1600 | ; SSE41:       # %bb.0: | 
|  | 1601 | ; SSE41-NEXT:    pminub %xmm3, %xmm1 | 
|  | 1602 | ; SSE41-NEXT:    pminub %xmm2, %xmm0 | 
|  | 1603 | ; SSE41-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1604 | ; SSE41-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1605 | ; SSE41-NEXT:    psrlw $8, %xmm1 | 
|  | 1606 | ; SSE41-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1607 | ; SSE41-NEXT:    phminposuw %xmm1, %xmm0 | 
|  | 1608 | ; SSE41-NEXT:    pextrb $0, %xmm0, %eax | 
|  | 1609 | ; SSE41-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1610 | ; SSE41-NEXT:    retq | 
|  | 1611 | ; | 
|  | 1612 | ; AVX1-LABEL: test_v64i8: | 
|  | 1613 | ; AVX1:       # %bb.0: | 
|  | 1614 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2 | 
|  | 1615 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3 | 
|  | 1616 | ; AVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm2 | 
|  | 1617 | ; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1618 | ; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0 | 
|  | 1619 | ; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1620 | ; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1621 | ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1622 | ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1623 | ; AVX1-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1624 | ; AVX1-NEXT:    vzeroupper | 
|  | 1625 | ; AVX1-NEXT:    retq | 
|  | 1626 | ; | 
|  | 1627 | ; AVX2-LABEL: test_v64i8: | 
|  | 1628 | ; AVX2:       # %bb.0: | 
|  | 1629 | ; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0 | 
|  | 1630 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1631 | ; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1632 | ; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1633 | ; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1634 | ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1635 | ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1636 | ; AVX2-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1637 | ; AVX2-NEXT:    vzeroupper | 
|  | 1638 | ; AVX2-NEXT:    retq | 
|  | 1639 | ; | 
|  | 1640 | ; AVX512-LABEL: test_v64i8: | 
|  | 1641 | ; AVX512:       # %bb.0: | 
|  | 1642 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 1643 | ; AVX512-NEXT:    vpminub %ymm1, %ymm0, %ymm0 | 
|  | 1644 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1645 | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1646 | ; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1647 | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1648 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1649 | ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1650 | ; AVX512-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1651 | ; AVX512-NEXT:    vzeroupper | 
|  | 1652 | ; AVX512-NEXT:    retq | 
|  | 1653 | %1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v64i8(<64 x i8> %a0) | 
|  | 1654 | ret i8 %1 | 
|  | 1655 | } | 
|  | 1656 |  | 
|  | 1657 | define i8 @test_v128i8(<128 x i8> %a0) { | 
|  | 1658 | ; SSE2-LABEL: test_v128i8: | 
|  | 1659 | ; SSE2:       # %bb.0: | 
|  | 1660 | ; SSE2-NEXT:    pminub %xmm6, %xmm2 | 
|  | 1661 | ; SSE2-NEXT:    pminub %xmm4, %xmm0 | 
|  | 1662 | ; SSE2-NEXT:    pminub %xmm2, %xmm0 | 
|  | 1663 | ; SSE2-NEXT:    pminub %xmm7, %xmm3 | 
|  | 1664 | ; SSE2-NEXT:    pminub %xmm5, %xmm1 | 
|  | 1665 | ; SSE2-NEXT:    pminub %xmm3, %xmm1 | 
|  | 1666 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1667 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] | 
|  | 1668 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1669 | ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] | 
|  | 1670 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1671 | ; SSE2-NEXT:    movdqa %xmm1, %xmm0 | 
|  | 1672 | ; SSE2-NEXT:    psrld $16, %xmm0 | 
|  | 1673 | ; SSE2-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1674 | ; SSE2-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1675 | ; SSE2-NEXT:    psrlw $8, %xmm1 | 
|  | 1676 | ; SSE2-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1677 | ; SSE2-NEXT:    movd %xmm1, %eax | 
|  | 1678 | ; SSE2-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1679 | ; SSE2-NEXT:    retq | 
|  | 1680 | ; | 
|  | 1681 | ; SSE41-LABEL: test_v128i8: | 
|  | 1682 | ; SSE41:       # %bb.0: | 
|  | 1683 | ; SSE41-NEXT:    pminub %xmm7, %xmm3 | 
|  | 1684 | ; SSE41-NEXT:    pminub %xmm5, %xmm1 | 
|  | 1685 | ; SSE41-NEXT:    pminub %xmm3, %xmm1 | 
|  | 1686 | ; SSE41-NEXT:    pminub %xmm6, %xmm2 | 
|  | 1687 | ; SSE41-NEXT:    pminub %xmm4, %xmm0 | 
|  | 1688 | ; SSE41-NEXT:    pminub %xmm2, %xmm0 | 
|  | 1689 | ; SSE41-NEXT:    pminub %xmm1, %xmm0 | 
|  | 1690 | ; SSE41-NEXT:    movdqa %xmm0, %xmm1 | 
|  | 1691 | ; SSE41-NEXT:    psrlw $8, %xmm1 | 
|  | 1692 | ; SSE41-NEXT:    pminub %xmm0, %xmm1 | 
|  | 1693 | ; SSE41-NEXT:    phminposuw %xmm1, %xmm0 | 
|  | 1694 | ; SSE41-NEXT:    pextrb $0, %xmm0, %eax | 
|  | 1695 | ; SSE41-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1696 | ; SSE41-NEXT:    retq | 
|  | 1697 | ; | 
|  | 1698 | ; AVX1-LABEL: test_v128i8: | 
|  | 1699 | ; AVX1:       # %bb.0: | 
|  | 1700 | ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4 | 
|  | 1701 | ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5 | 
|  | 1702 | ; AVX1-NEXT:    vpminub %xmm4, %xmm5, %xmm4 | 
|  | 1703 | ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5 | 
|  | 1704 | ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6 | 
|  | 1705 | ; AVX1-NEXT:    vpminub %xmm5, %xmm6, %xmm5 | 
|  | 1706 | ; AVX1-NEXT:    vpminub %xmm4, %xmm5, %xmm4 | 
|  | 1707 | ; AVX1-NEXT:    vpminub %xmm3, %xmm1, %xmm1 | 
|  | 1708 | ; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0 | 
|  | 1709 | ; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1710 | ; AVX1-NEXT:    vpminub %xmm4, %xmm0, %xmm0 | 
|  | 1711 | ; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1712 | ; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1713 | ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1714 | ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1715 | ; AVX1-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1716 | ; AVX1-NEXT:    vzeroupper | 
|  | 1717 | ; AVX1-NEXT:    retq | 
|  | 1718 | ; | 
|  | 1719 | ; AVX2-LABEL: test_v128i8: | 
|  | 1720 | ; AVX2:       # %bb.0: | 
|  | 1721 | ; AVX2-NEXT:    vpminub %ymm3, %ymm1, %ymm1 | 
|  | 1722 | ; AVX2-NEXT:    vpminub %ymm2, %ymm0, %ymm0 | 
|  | 1723 | ; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0 | 
|  | 1724 | ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1725 | ; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1726 | ; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1727 | ; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1728 | ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1729 | ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1730 | ; AVX2-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1731 | ; AVX2-NEXT:    vzeroupper | 
|  | 1732 | ; AVX2-NEXT:    retq | 
|  | 1733 | ; | 
|  | 1734 | ; AVX512-LABEL: test_v128i8: | 
|  | 1735 | ; AVX512:       # %bb.0: | 
|  | 1736 | ; AVX512-NEXT:    vpminub %zmm1, %zmm0, %zmm0 | 
|  | 1737 | ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 | 
|  | 1738 | ; AVX512-NEXT:    vpminub %ymm1, %ymm0, %ymm0 | 
|  | 1739 | ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1 | 
|  | 1740 | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1741 | ; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1 | 
|  | 1742 | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 | 
|  | 1743 | ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0 | 
|  | 1744 | ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax | 
|  | 1745 | ; AVX512-NEXT:    # kill: def $al killed $al killed $eax | 
|  | 1746 | ; AVX512-NEXT:    vzeroupper | 
|  | 1747 | ; AVX512-NEXT:    retq | 
|  | 1748 | %1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v128i8(<128 x i8> %a0) | 
|  | 1749 | ret i8 %1 | 
|  | 1750 | } | 
|  | 1751 |  | 
|  | 1752 | declare i64 @llvm.experimental.vector.reduce.umin.i64.v2i64(<2 x i64>) | 
|  | 1753 | declare i64 @llvm.experimental.vector.reduce.umin.i64.v4i64(<4 x i64>) | 
|  | 1754 | declare i64 @llvm.experimental.vector.reduce.umin.i64.v8i64(<8 x i64>) | 
|  | 1755 | declare i64 @llvm.experimental.vector.reduce.umin.i64.v16i64(<16 x i64>) | 
|  | 1756 |  | 
|  | 1757 | declare i32 @llvm.experimental.vector.reduce.umin.i32.v4i32(<4 x i32>) | 
|  | 1758 | declare i32 @llvm.experimental.vector.reduce.umin.i32.v8i32(<8 x i32>) | 
|  | 1759 | declare i32 @llvm.experimental.vector.reduce.umin.i32.v16i32(<16 x i32>) | 
|  | 1760 | declare i32 @llvm.experimental.vector.reduce.umin.i32.v32i32(<32 x i32>) | 
|  | 1761 |  | 
|  | 1762 | declare i16 @llvm.experimental.vector.reduce.umin.i16.v8i16(<8 x i16>) | 
|  | 1763 | declare i16 @llvm.experimental.vector.reduce.umin.i16.v16i16(<16 x i16>) | 
|  | 1764 | declare i16 @llvm.experimental.vector.reduce.umin.i16.v32i16(<32 x i16>) | 
|  | 1765 | declare i16 @llvm.experimental.vector.reduce.umin.i16.v64i16(<64 x i16>) | 
|  | 1766 |  | 
|  | 1767 | declare i8 @llvm.experimental.vector.reduce.umin.i8.v16i8(<16 x i8>) | 
|  | 1768 | declare i8 @llvm.experimental.vector.reduce.umin.i8.v32i8(<32 x i8>) | 
|  | 1769 | declare i8 @llvm.experimental.vector.reduce.umin.i8.v64i8(<64 x i8>) | 
|  | 1770 | declare i8 @llvm.experimental.vector.reduce.umin.i8.v128i8(<128 x i8>) |