blob: deed554d842c6bb72f26f7cede3169a95ab2ab83 [file] [log] [blame]
Dan Gohmanc8054d92009-09-09 00:09:15 +00001; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
Bob Wilson8a37bbe2009-07-26 00:39:34 +00002
Rafael Espindola29dda212010-06-17 15:18:27 +00003define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +00004;CHECK: test_vrev64D8:
5;CHECK: vrev64.8
6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
8 ret <8 x i8> %tmp2
9}
10
Rafael Espindola29dda212010-06-17 15:18:27 +000011define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000012;CHECK: test_vrev64D16:
13;CHECK: vrev64.16
14 %tmp1 = load <4 x i16>* %A
15 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
16 ret <4 x i16> %tmp2
17}
18
Rafael Espindola29dda212010-06-17 15:18:27 +000019define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000020;CHECK: test_vrev64D32:
21;CHECK: vrev64.32
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
24 ret <2 x i32> %tmp2
25}
26
Rafael Espindola29dda212010-06-17 15:18:27 +000027define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000028;CHECK: test_vrev64Df:
29;CHECK: vrev64.32
30 %tmp1 = load <2 x float>* %A
31 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
32 ret <2 x float> %tmp2
33}
34
Rafael Espindola29dda212010-06-17 15:18:27 +000035define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000036;CHECK: test_vrev64Q8:
37;CHECK: vrev64.8
38 %tmp1 = load <16 x i8>* %A
39 %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
40 ret <16 x i8> %tmp2
41}
42
Rafael Espindola29dda212010-06-17 15:18:27 +000043define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000044;CHECK: test_vrev64Q16:
45;CHECK: vrev64.16
46 %tmp1 = load <8 x i16>* %A
47 %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
48 ret <8 x i16> %tmp2
49}
50
Rafael Espindola29dda212010-06-17 15:18:27 +000051define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000052;CHECK: test_vrev64Q32:
53;CHECK: vrev64.32
54 %tmp1 = load <4 x i32>* %A
55 %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
56 ret <4 x i32> %tmp2
57}
58
Rafael Espindola29dda212010-06-17 15:18:27 +000059define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000060;CHECK: test_vrev64Qf:
61;CHECK: vrev64.32
62 %tmp1 = load <4 x float>* %A
63 %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
64 ret <4 x float> %tmp2
65}
66
Rafael Espindola29dda212010-06-17 15:18:27 +000067define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000068;CHECK: test_vrev32D8:
69;CHECK: vrev32.8
70 %tmp1 = load <8 x i8>* %A
71 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
72 ret <8 x i8> %tmp2
73}
74
Rafael Espindola29dda212010-06-17 15:18:27 +000075define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000076;CHECK: test_vrev32D16:
77;CHECK: vrev32.16
78 %tmp1 = load <4 x i16>* %A
79 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
80 ret <4 x i16> %tmp2
81}
82
Rafael Espindola29dda212010-06-17 15:18:27 +000083define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000084;CHECK: test_vrev32Q8:
85;CHECK: vrev32.8
86 %tmp1 = load <16 x i8>* %A
87 %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
88 ret <16 x i8> %tmp2
89}
90
Rafael Espindola29dda212010-06-17 15:18:27 +000091define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +000092;CHECK: test_vrev32Q16:
93;CHECK: vrev32.16
94 %tmp1 = load <8 x i16>* %A
95 %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
96 ret <8 x i16> %tmp2
97}
98
Rafael Espindola29dda212010-06-17 15:18:27 +000099define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +0000100;CHECK: test_vrev16D8:
101;CHECK: vrev16.8
102 %tmp1 = load <8 x i8>* %A
103 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
104 ret <8 x i8> %tmp2
105}
106
Rafael Espindola29dda212010-06-17 15:18:27 +0000107define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
Bob Wilson8a37bbe2009-07-26 00:39:34 +0000108;CHECK: test_vrev16Q8:
109;CHECK: vrev16.8
110 %tmp1 = load <16 x i8>* %A
111 %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
112 ret <16 x i8> %tmp2
113}