Simon Pilgrim | 7e6606f | 2015-11-04 20:48:09 +0000 | [diff] [blame] | 1 | ; RUN: llc -mtriple=i686-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s -check-prefix=X32 |
| 2 | ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s -check-prefix=X64 |
| 3 | |
| 4 | ; Test for case where insertps was folding the load of the insertion element, but a later optimization |
| 5 | ; was then manipulating the load. |
| 6 | |
| 7 | define <4 x float> @insertps_unfold(<4 x float>* %v0, <4 x float>* %v1) { |
| 8 | ; X32-LABEL: insertps_unfold: |
| 9 | ; X32: # BB#0: |
| 10 | ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax |
| 11 | ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| 12 | ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| 13 | ; X32-NEXT: movaps (%eax), %xmm0 |
| 14 | ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] |
| 15 | ; X32-NEXT: addps %xmm1, %xmm0 |
| 16 | ; X32-NEXT: retl |
| 17 | ; |
| 18 | ; X64-LABEL: insertps_unfold: |
| 19 | ; X64: # BB#0: |
| 20 | ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| 21 | ; X64-NEXT: movaps (%rdi), %xmm0 |
| 22 | ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] |
| 23 | ; X64-NEXT: addps %xmm1, %xmm0 |
| 24 | ; X64-NEXT: retq |
| 25 | %a = getelementptr inbounds <4 x float>, <4 x float>* %v1, i64 0, i64 1 |
| 26 | %b = load float, float* %a, align 4 |
| 27 | %c = insertelement <4 x float> undef, float %b, i32 0 |
| 28 | %d = load <4 x float>, <4 x float>* %v1, align 16 |
| 29 | %e = load <4 x float>, <4 x float>* %v0, align 16 |
| 30 | %f = shufflevector <4 x float> %e, <4 x float> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 5> |
| 31 | %g = fadd <4 x float> %c, %f |
| 32 | ret <4 x float> %g |
| 33 | } |