Simon Pilgrim | 9e71cee | 2017-02-02 17:23:57 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86 |
| 3 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 |
| 4 | |
| 5 | %v4_varying_complex = type { <4 x float>, <4 x float> } |
| 6 | |
| 7 | define void @FFT(%v4_varying_complex* noalias nocapture %destination, float* noalias %re, <4 x i32>* noalias nocapture %ptr_cast_for_load) nounwind { |
| 8 | ; X86-LABEL: FFT: |
Francis Visoiu Mistrih | 25528d6 | 2017-12-04 17:18:51 +0000 | [diff] [blame] | 9 | ; X86: # %bb.0: # %begin |
Simon Pilgrim | 9e71cee | 2017-02-02 17:23:57 +0000 | [diff] [blame] | 10 | ; X86-NEXT: pushl %ebx |
| 11 | ; X86-NEXT: pushl %edi |
| 12 | ; X86-NEXT: pushl %esi |
| 13 | ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| 14 | ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| 15 | ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| 16 | ; X86-NEXT: movdqu (%edx), %xmm0 |
| 17 | ; X86-NEXT: pslld $4, %xmm0 |
| 18 | ; X86-NEXT: movd %xmm0, %edx |
| 19 | ; X86-NEXT: pextrd $1, %xmm0, %esi |
| 20 | ; X86-NEXT: pextrd $2, %xmm0, %edi |
| 21 | ; X86-NEXT: pextrd $3, %xmm0, %ebx |
| 22 | ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| 23 | ; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| 24 | ; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| 25 | ; X86-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero |
| 26 | ; X86-NEXT: movss %xmm0, 128(%eax) |
| 27 | ; X86-NEXT: movss %xmm1, 164(%eax) |
| 28 | ; X86-NEXT: movss %xmm2, 200(%eax) |
| 29 | ; X86-NEXT: movss %xmm3, 236(%eax) |
| 30 | ; X86-NEXT: popl %esi |
| 31 | ; X86-NEXT: popl %edi |
| 32 | ; X86-NEXT: popl %ebx |
| 33 | ; X86-NEXT: retl |
| 34 | ; |
| 35 | ; X64-LABEL: FFT: |
Francis Visoiu Mistrih | 25528d6 | 2017-12-04 17:18:51 +0000 | [diff] [blame] | 36 | ; X64: # %bb.0: # %begin |
Simon Pilgrim | 9e71cee | 2017-02-02 17:23:57 +0000 | [diff] [blame] | 37 | ; X64-NEXT: movdqu (%rdx), %xmm0 |
| 38 | ; X64-NEXT: pslld $4, %xmm0 |
Ayman Musa | d9fb157 | 2017-04-26 07:08:44 +0000 | [diff] [blame] | 39 | ; X64-NEXT: movq %xmm0, %rax |
Simon Pilgrim | 9e71cee | 2017-02-02 17:23:57 +0000 | [diff] [blame] | 40 | ; X64-NEXT: movslq %eax, %r8 |
| 41 | ; X64-NEXT: sarq $32, %rax |
| 42 | ; X64-NEXT: pextrq $1, %xmm0, %rdx |
| 43 | ; X64-NEXT: movslq %edx, %rcx |
| 44 | ; X64-NEXT: sarq $32, %rdx |
| 45 | ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| 46 | ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| 47 | ; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| 48 | ; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero |
| 49 | ; X64-NEXT: movss %xmm0, 128(%rdi) |
| 50 | ; X64-NEXT: movss %xmm1, 164(%rdi) |
| 51 | ; X64-NEXT: movss %xmm2, 200(%rdi) |
| 52 | ; X64-NEXT: movss %xmm3, 236(%rdi) |
| 53 | ; X64-NEXT: retq |
| 54 | begin: |
| 55 | %ptr_masked_load79 = load <4 x i32>, <4 x i32>* %ptr_cast_for_load, align 4 |
| 56 | %mul__bitReversedProgramIndex_load = shl <4 x i32> %ptr_masked_load79, <i32 4, i32 4, i32 4, i32 4> |
| 57 | |
| 58 | %offset32_1 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 0 |
| 59 | %ptroffset_1 = sext i32 %offset32_1 to i64 |
| 60 | %offset32_2 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 1 |
| 61 | %ptroffset_2 = sext i32 %offset32_2 to i64 |
| 62 | %offset32_3 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 2 |
| 63 | %ptroffset_3 = sext i32 %offset32_3 to i64 |
| 64 | %offset32_4 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 3 |
| 65 | %ptroffset_4 = sext i32 %offset32_4 to i64 |
| 66 | |
| 67 | %ptrcast_1 = getelementptr float, float* %re, i64 %ptroffset_1 |
| 68 | %val_1 = load float, float* %ptrcast_1, align 4 |
| 69 | %ptrcast_2 = getelementptr float, float* %re, i64 %ptroffset_2 |
| 70 | %val_2 = load float, float* %ptrcast_2, align 4 |
| 71 | %ptrcast_3 = getelementptr float, float* %re, i64 %ptroffset_3 |
| 72 | %val_3 = load float, float* %ptrcast_3, align 4 |
| 73 | %ptrcast_4 = getelementptr float, float* %re, i64 %ptroffset_4 |
| 74 | %val_4 = load float, float* %ptrcast_4, align 4 |
| 75 | |
| 76 | %destination_load_ptr2int_2void = bitcast %v4_varying_complex* %destination to i8* |
| 77 | %ptrcast1_1 = getelementptr inbounds %v4_varying_complex, %v4_varying_complex* %destination, i64 4, i32 0, i64 0 |
| 78 | store float %val_1, float* %ptrcast1_1, align 4 |
| 79 | %finalptr_2 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 164 |
| 80 | %ptrcast1_2 = bitcast i8* %finalptr_2 to float* |
| 81 | store float %val_2, float* %ptrcast1_2, align 4 |
| 82 | %finalptr_3 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 200 |
| 83 | %ptrcast1_3 = bitcast i8* %finalptr_3 to float* |
| 84 | store float %val_3, float* %ptrcast1_3, align 4 |
| 85 | %finalptr_4 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 236 |
| 86 | %ptrcast1_4 = bitcast i8* %finalptr_4 to float* |
| 87 | store float %val_4, float* %ptrcast1_4, align 4 |
| 88 | ret void |
| 89 | } |