Simon Pilgrim | 569106f | 2016-01-03 17:14:15 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41 |
| 3 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 |
| 4 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 |
| 5 | |
| 6 | define <4 x float> @shuffle_v4f32_0z27(<4 x float> %x, <4 x float> %a) { |
| 7 | ; SSE-LABEL: shuffle_v4f32_0z27: |
| 8 | ; SSE: # BB#0: |
| 9 | ; SSE-NEXT: xorps %xmm2, %xmm2 |
| 10 | ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3] |
| 11 | ; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2] |
| 12 | ; SSE-NEXT: retq |
| 13 | ; |
| 14 | ; AVX-LABEL: shuffle_v4f32_0z27: |
| 15 | ; AVX: # BB#0: |
| 16 | ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| 17 | ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3] |
| 18 | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2] |
| 19 | ; AVX-NEXT: retq |
| 20 | %vecext = extractelement <4 x float> %x, i32 0 |
| 21 | %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 |
| 22 | %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1 |
| 23 | %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 6, i32 undef> |
| 24 | %vecinit5 = shufflevector <4 x float> %vecinit3, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6> |
| 25 | ret <4 x float> %vecinit5 |
| 26 | } |
| 27 | |
| 28 | define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %xyzw, <4 x float> %abcd) { |
| 29 | ; SSE-LABEL: shuffle_v4f32_0zz4: |
| 30 | ; SSE: # BB#0: |
| 31 | ; SSE-NEXT: xorps %xmm2, %xmm2 |
| 32 | ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] |
| 33 | ; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] |
| 34 | ; SSE-NEXT: retq |
| 35 | ; |
| 36 | ; AVX-LABEL: shuffle_v4f32_0zz4: |
| 37 | ; AVX: # BB#0: |
| 38 | ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| 39 | ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] |
| 40 | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] |
| 41 | ; AVX-NEXT: retq |
| 42 | %vecext = extractelement <4 x float> %xyzw, i32 0 |
| 43 | %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 |
| 44 | %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1 |
| 45 | %vecinit2 = insertelement <4 x float> %vecinit1, float 0.000000e+00, i32 2 |
| 46 | %vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %abcd, <4 x i32> <i32 0, i32 1, i32 2, i32 4> |
| 47 | ret <4 x float> %vecinit4 |
| 48 | } |
| 49 | |
| 50 | define <4 x float> @shuffle_v4f32_0z24(<4 x float> %xyzw, <4 x float> %abcd) { |
| 51 | ; SSE-LABEL: shuffle_v4f32_0z24: |
| 52 | ; SSE: # BB#0: |
| 53 | ; SSE-NEXT: xorps %xmm2, %xmm2 |
| 54 | ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3] |
| 55 | ; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] |
| 56 | ; SSE-NEXT: retq |
| 57 | ; |
| 58 | ; AVX-LABEL: shuffle_v4f32_0z24: |
| 59 | ; AVX: # BB#0: |
| 60 | ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| 61 | ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3] |
| 62 | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] |
| 63 | ; AVX-NEXT: retq |
| 64 | %vecext = extractelement <4 x float> %xyzw, i32 0 |
| 65 | %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 |
| 66 | %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1 |
| 67 | %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %xyzw, <4 x i32> <i32 0, i32 1, i32 6, i32 undef> |
| 68 | %vecinit5 = shufflevector <4 x float> %vecinit3, <4 x float> %abcd, <4 x i32> <i32 0, i32 1, i32 2, i32 4> |
| 69 | ret <4 x float> %vecinit5 |
| 70 | } |
| 71 | |
| 72 | define <4 x float> @shuffle_v4f32_0zz0(float %a) { |
| 73 | ; SSE-LABEL: shuffle_v4f32_0zz0: |
| 74 | ; SSE: # BB#0: |
| 75 | ; SSE-NEXT: xorps %xmm1, %xmm1 |
| 76 | ; SSE-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] |
| 77 | ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,1,0] |
| 78 | ; SSE-NEXT: movaps %xmm1, %xmm0 |
| 79 | ; SSE-NEXT: retq |
| 80 | ; |
| 81 | ; AVX-LABEL: shuffle_v4f32_0zz0: |
| 82 | ; AVX: # BB#0: |
| 83 | ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| 84 | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] |
| 85 | ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,0] |
| 86 | ; AVX-NEXT: retq |
| 87 | %vecinit = insertelement <4 x float> undef, float %a, i32 0 |
| 88 | %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1 |
| 89 | %vecinit2 = insertelement <4 x float> %vecinit1, float 0.000000e+00, i32 2 |
| 90 | %vecinit3 = insertelement <4 x float> %vecinit2, float %a, i32 3 |
| 91 | ret <4 x float> %vecinit3 |
| 92 | } |
| 93 | |
| 94 | define <4 x float> @shuffle_v4f32_0z6z(<4 x float> %A, <4 x float> %B) { |
| 95 | ; SSE-LABEL: shuffle_v4f32_0z6z: |
| 96 | ; SSE: # BB#0: |
| 97 | ; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero |
| 98 | ; SSE-NEXT: retq |
| 99 | ; |
| 100 | ; AVX-LABEL: shuffle_v4f32_0z6z: |
| 101 | ; AVX: # BB#0: |
| 102 | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero |
| 103 | ; AVX-NEXT: retq |
| 104 | %vecext = extractelement <4 x float> %A, i32 0 |
| 105 | %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 |
| 106 | %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1 |
| 107 | %vecext2 = extractelement <4 x float> %B, i32 2 |
| 108 | %vecinit3 = insertelement <4 x float> %vecinit1, float %vecext2, i32 2 |
| 109 | %vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3 |
| 110 | ret <4 x float> %vecinit4 |
| 111 | } |
Simon Pilgrim | 83e44c6 | 2016-01-07 10:24:19 +0000 | [diff] [blame^] | 112 | |
| 113 | define float @extract_zero_insertps_z0z7(<4 x float> %a0, <4 x float> %a1) { |
| 114 | ; SSE-LABEL: extract_zero_insertps_z0z7: |
| 115 | ; SSE: # BB#0: |
| 116 | ; SSE-NEXT: xorps %xmm0, %xmm0 |
| 117 | ; SSE-NEXT: retq |
| 118 | ; |
| 119 | ; AVX-LABEL: extract_zero_insertps_z0z7: |
| 120 | ; AVX: # BB#0: |
| 121 | ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| 122 | ; AVX-NEXT: retq |
| 123 | %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 21) |
| 124 | %ext = extractelement <4 x float> %res, i32 0 |
| 125 | ret float %ext |
| 126 | } |
| 127 | |
| 128 | define float @extract_lane_insertps_5123(<4 x float> %a0, <4 x float> *%p1) { |
| 129 | ; SSE-LABEL: extract_lane_insertps_5123: |
| 130 | ; SSE: # BB#0: |
| 131 | ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| 132 | ; SSE-NEXT: retq |
| 133 | ; |
| 134 | ; AVX-LABEL: extract_lane_insertps_5123: |
| 135 | ; AVX: # BB#0: |
| 136 | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| 137 | ; AVX-NEXT: retq |
| 138 | %a1 = load <4 x float>, <4 x float> *%p1 |
| 139 | %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 128) |
| 140 | %ext = extractelement <4 x float> %res, i32 0 |
| 141 | ret float %ext |
| 142 | } |
| 143 | |
| 144 | declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone |