blob: 14e668efb1da4b29e95371e6ed884fd1e5a6bbe0 [file] [log] [blame]
Cameron Zwarichc0e6d782011-03-30 23:01:21 +00001; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2
3define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
4;CHECK: v_bsli8:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +00005;CHECK: vldr.64
6;CHECK: vldr.64
Cameron Zwarichc0e6d782011-03-30 23:01:21 +00007;CHECK: vbsl
8 %tmp1 = load <8 x i8>* %A
9 %tmp2 = load <8 x i8>* %B
10 %tmp3 = load <8 x i8>* %C
11 %tmp4 = and <8 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
12 %tmp6 = and <8 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4>
13 %tmp7 = or <8 x i8> %tmp4, %tmp6
14 ret <8 x i8> %tmp7
15}
16
17define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
18;CHECK: v_bsli16:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +000019;CHECK: vldr.64
20;CHECK: vldr.64
Cameron Zwarichc0e6d782011-03-30 23:01:21 +000021;CHECK: vbsl
22 %tmp1 = load <4 x i16>* %A
23 %tmp2 = load <4 x i16>* %B
24 %tmp3 = load <4 x i16>* %C
25 %tmp4 = and <4 x i16> %tmp1, <i16 3, i16 3, i16 3, i16 3>
26 %tmp6 = and <4 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4>
27 %tmp7 = or <4 x i16> %tmp4, %tmp6
28 ret <4 x i16> %tmp7
29}
30
31define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
32;CHECK: v_bsli32:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +000033;CHECK: vldr.64
34;CHECK: vldr.64
Cameron Zwarichc0e6d782011-03-30 23:01:21 +000035;CHECK: vbsl
36 %tmp1 = load <2 x i32>* %A
37 %tmp2 = load <2 x i32>* %B
38 %tmp3 = load <2 x i32>* %C
39 %tmp4 = and <2 x i32> %tmp1, <i32 3, i32 3>
40 %tmp6 = and <2 x i32> %tmp3, <i32 -4, i32 -4>
41 %tmp7 = or <2 x i32> %tmp4, %tmp6
42 ret <2 x i32> %tmp7
43}
44
45define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
46;CHECK: v_bsli64:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +000047;CHECK: vldr.64
48;CHECK: vldr.64
49;CHECK: vldr.64
Cameron Zwarichc0e6d782011-03-30 23:01:21 +000050;CHECK: vbsl
51 %tmp1 = load <1 x i64>* %A
52 %tmp2 = load <1 x i64>* %B
53 %tmp3 = load <1 x i64>* %C
54 %tmp4 = and <1 x i64> %tmp1, <i64 3>
55 %tmp6 = and <1 x i64> %tmp3, <i64 -4>
56 %tmp7 = or <1 x i64> %tmp4, %tmp6
57 ret <1 x i64> %tmp7
58}
59
60define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
61;CHECK: v_bslQi8:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +000062;CHECK: vldmia
63;CHECK: vldmia
Cameron Zwarichc0e6d782011-03-30 23:01:21 +000064;CHECK: vbsl
65 %tmp1 = load <16 x i8>* %A
66 %tmp2 = load <16 x i8>* %B
67 %tmp3 = load <16 x i8>* %C
68 %tmp4 = and <16 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
69 %tmp6 = and <16 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4>
70 %tmp7 = or <16 x i8> %tmp4, %tmp6
71 ret <16 x i8> %tmp7
72}
73
74define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
75;CHECK: v_bslQi16:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +000076;CHECK: vldmia
77;CHECK: vldmia
Cameron Zwarichc0e6d782011-03-30 23:01:21 +000078;CHECK: vbsl
79 %tmp1 = load <8 x i16>* %A
80 %tmp2 = load <8 x i16>* %B
81 %tmp3 = load <8 x i16>* %C
82 %tmp4 = and <8 x i16> %tmp1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
83 %tmp6 = and <8 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4, i16 -4, i16 -4, i16 -4, i16 -4>
84 %tmp7 = or <8 x i16> %tmp4, %tmp6
85 ret <8 x i16> %tmp7
86}
87
88define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
89;CHECK: v_bslQi32:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +000090;CHECK: vldmia
91;CHECK: vldmia
Cameron Zwarichc0e6d782011-03-30 23:01:21 +000092;CHECK: vbsl
93 %tmp1 = load <4 x i32>* %A
94 %tmp2 = load <4 x i32>* %B
95 %tmp3 = load <4 x i32>* %C
96 %tmp4 = and <4 x i32> %tmp1, <i32 3, i32 3, i32 3, i32 3>
97 %tmp6 = and <4 x i32> %tmp3, <i32 -4, i32 -4, i32 -4, i32 -4>
98 %tmp7 = or <4 x i32> %tmp4, %tmp6
99 ret <4 x i32> %tmp7
100}
101
102define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
103;CHECK: v_bslQi64:
Cameron Zwarich5af60ce2011-04-13 21:01:19 +0000104;CHECK: vldmia
105;CHECK: vldmia
106;CHECK: vldmia
Cameron Zwarichc0e6d782011-03-30 23:01:21 +0000107;CHECK: vbsl
108 %tmp1 = load <2 x i64>* %A
109 %tmp2 = load <2 x i64>* %B
110 %tmp3 = load <2 x i64>* %C
111 %tmp4 = and <2 x i64> %tmp1, <i64 3, i64 3>
112 %tmp6 = and <2 x i64> %tmp3, <i64 -4, i64 -4>
113 %tmp7 = or <2 x i64> %tmp4, %tmp6
114 ret <2 x i64> %tmp7
115}