blob: 8ca2fd26b6a7c629f7d5c221cc62c35e512d5209 [file] [log] [blame]
Dan Gohmanc8054d92009-09-09 00:09:15 +00001; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
Bob Wilson2e076c42009-06-22 23:27:02 +00002
3define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +00004;CHECK: v_bsli8:
5;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +00006 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
8 %tmp3 = load <8 x i8>* %C
9 %tmp4 = and <8 x i8> %tmp1, %tmp2
10 %tmp5 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
11 %tmp6 = and <8 x i8> %tmp5, %tmp3
12 %tmp7 = or <8 x i8> %tmp4, %tmp6
13 ret <8 x i8> %tmp7
14}
15
16define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +000017;CHECK: v_bsli16:
18;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +000019 %tmp1 = load <4 x i16>* %A
20 %tmp2 = load <4 x i16>* %B
21 %tmp3 = load <4 x i16>* %C
22 %tmp4 = and <4 x i16> %tmp1, %tmp2
23 %tmp5 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 >
24 %tmp6 = and <4 x i16> %tmp5, %tmp3
25 %tmp7 = or <4 x i16> %tmp4, %tmp6
26 ret <4 x i16> %tmp7
27}
28
29define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +000030;CHECK: v_bsli32:
31;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +000032 %tmp1 = load <2 x i32>* %A
33 %tmp2 = load <2 x i32>* %B
34 %tmp3 = load <2 x i32>* %C
35 %tmp4 = and <2 x i32> %tmp1, %tmp2
36 %tmp5 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 >
37 %tmp6 = and <2 x i32> %tmp5, %tmp3
38 %tmp7 = or <2 x i32> %tmp4, %tmp6
39 ret <2 x i32> %tmp7
40}
41
42define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +000043;CHECK: v_bsli64:
44;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +000045 %tmp1 = load <1 x i64>* %A
46 %tmp2 = load <1 x i64>* %B
47 %tmp3 = load <1 x i64>* %C
48 %tmp4 = and <1 x i64> %tmp1, %tmp2
49 %tmp5 = xor <1 x i64> %tmp1, < i64 -1 >
50 %tmp6 = and <1 x i64> %tmp5, %tmp3
51 %tmp7 = or <1 x i64> %tmp4, %tmp6
52 ret <1 x i64> %tmp7
53}
54
55define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +000056;CHECK: v_bslQi8:
57;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +000058 %tmp1 = load <16 x i8>* %A
59 %tmp2 = load <16 x i8>* %B
60 %tmp3 = load <16 x i8>* %C
61 %tmp4 = and <16 x i8> %tmp1, %tmp2
62 %tmp5 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
63 %tmp6 = and <16 x i8> %tmp5, %tmp3
64 %tmp7 = or <16 x i8> %tmp4, %tmp6
65 ret <16 x i8> %tmp7
66}
67
68define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +000069;CHECK: v_bslQi16:
70;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +000071 %tmp1 = load <8 x i16>* %A
72 %tmp2 = load <8 x i16>* %B
73 %tmp3 = load <8 x i16>* %C
74 %tmp4 = and <8 x i16> %tmp1, %tmp2
75 %tmp5 = xor <8 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
76 %tmp6 = and <8 x i16> %tmp5, %tmp3
77 %tmp7 = or <8 x i16> %tmp4, %tmp6
78 ret <8 x i16> %tmp7
79}
80
81define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +000082;CHECK: v_bslQi32:
83;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +000084 %tmp1 = load <4 x i32>* %A
85 %tmp2 = load <4 x i32>* %B
86 %tmp3 = load <4 x i32>* %C
87 %tmp4 = and <4 x i32> %tmp1, %tmp2
88 %tmp5 = xor <4 x i32> %tmp1, < i32 -1, i32 -1, i32 -1, i32 -1 >
89 %tmp6 = and <4 x i32> %tmp5, %tmp3
90 %tmp7 = or <4 x i32> %tmp4, %tmp6
91 ret <4 x i32> %tmp7
92}
93
94define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
Bob Wilson97262e02009-08-07 23:45:02 +000095;CHECK: v_bslQi64:
96;CHECK: vbsl
Bob Wilson2e076c42009-06-22 23:27:02 +000097 %tmp1 = load <2 x i64>* %A
98 %tmp2 = load <2 x i64>* %B
99 %tmp3 = load <2 x i64>* %C
100 %tmp4 = and <2 x i64> %tmp1, %tmp2
101 %tmp5 = xor <2 x i64> %tmp1, < i64 -1, i64 -1 >
102 %tmp6 = and <2 x i64> %tmp5, %tmp3
103 %tmp7 = or <2 x i64> %tmp4, %tmp6
104 ret <2 x i64> %tmp7
105}
Jim Grosbach74b61c32012-09-21 00:18:20 +0000106
107define <8 x i8> @f1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind readnone optsize ssp {
108; CHECK: f1:
109; CHECK: vbsl
110 %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind
111 ret <8 x i8> %vbsl.i
112}
113
114define <4 x i16> @f2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
115; CHECK: f2:
116; CHECK: vbsl
117 %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind
118 ret <4 x i16> %vbsl3.i
119}
120
121define <2 x i32> @f3(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
122; CHECK: f3:
123; CHECK: vbsl
124 %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind
125 ret <2 x i32> %vbsl3.i
126}
127
128define <16 x i8> @g1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind readnone optsize ssp {
129; CHECK: g1:
130; CHECK: vbsl
131 %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind
132 ret <16 x i8> %vbsl.i
133}
134
135define <8 x i16> @g2(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone optsize ssp {
136; CHECK: g2:
137; CHECK: vbsl
138 %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind
139 ret <8 x i16> %vbsl3.i
140}
141
142define <4 x i32> @g3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
143; CHECK: g3:
144; CHECK: vbsl
145 %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind
146 ret <4 x i32> %vbsl3.i
147}
148
149declare <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
150declare <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
151declare <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
152declare <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
153declare <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
154declare <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone