Simon Pilgrim | cd25a2b | 2016-02-24 17:08:59 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3 |
| 3 | ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41 |
| 4 | ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 |
| 5 | ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 |
| 6 | ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512F |
| 7 | ; |
| 8 | ; Combine tests involving SSE3/SSSE3 target shuffles (MOVDDUP, MOVSHDUP, MOVSLDUP, PSHUFB) |
| 9 | |
| 10 | declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) |
| 11 | |
| 12 | define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) { |
| 13 | ; SSE-LABEL: combine_pshufb_movddup: |
| 14 | ; SSE: # BB#0: |
| 15 | ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,1,1,1,1,3,3,3,3] |
| 16 | ; SSE-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] |
| 17 | ; SSE-NEXT: retq |
| 18 | ; |
| 19 | ; AVX-LABEL: combine_pshufb_movddup: |
| 20 | ; AVX: # BB#0: |
| 21 | ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,1,1,1,1,3,3,3,3] |
| 22 | ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] |
| 23 | ; AVX-NEXT: retq |
| 24 | %1 = bitcast <4 x float> %a0 to <16 x i8> |
| 25 | %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>) |
| 26 | %3 = bitcast <16 x i8> %2 to <4 x float> |
| 27 | %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1> |
| 28 | ret <4 x float> %4 |
| 29 | } |
| 30 | |
| 31 | define <4 x float> @combine_pshufb_movshdup(<4 x float> %a0) { |
| 32 | ; SSE-LABEL: combine_pshufb_movshdup: |
| 33 | ; SSE: # BB#0: |
| 34 | ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,1,1,1,1,3,3,3,3] |
| 35 | ; SSE-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| 36 | ; SSE-NEXT: retq |
| 37 | ; |
| 38 | ; AVX-LABEL: combine_pshufb_movshdup: |
| 39 | ; AVX: # BB#0: |
| 40 | ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,1,1,1,1,3,3,3,3] |
| 41 | ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| 42 | ; AVX-NEXT: retq |
| 43 | %1 = bitcast <4 x float> %a0 to <16 x i8> |
| 44 | %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>) |
| 45 | %3 = bitcast <16 x i8> %2 to <4 x float> |
| 46 | %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3> |
| 47 | ret <4 x float> %4 |
| 48 | } |
| 49 | |
| 50 | define <4 x float> @combine_pshufb_movsldup(<4 x float> %a0) { |
| 51 | ; SSE-LABEL: combine_pshufb_movsldup: |
| 52 | ; SSE: # BB#0: |
| 53 | ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,1,1,1,1,3,3,3,3] |
| 54 | ; SSE-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] |
| 55 | ; SSE-NEXT: retq |
| 56 | ; |
| 57 | ; AVX-LABEL: combine_pshufb_movsldup: |
| 58 | ; AVX: # BB#0: |
| 59 | ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,1,1,1,1,3,3,3,3] |
| 60 | ; AVX-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] |
| 61 | ; AVX-NEXT: retq |
| 62 | %1 = bitcast <4 x float> %a0 to <16 x i8> |
| 63 | %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>) |
| 64 | %3 = bitcast <16 x i8> %2 to <4 x float> |
| 65 | %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> |
| 66 | ret <4 x float> %4 |
| 67 | } |