blob: 61d2ccb6432db54fedc10930c976d0d11814c8ed [file] [log] [blame]
Daniel Sanders7fab9122013-09-11 12:39:25 +00001; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
2
3define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
4 ; CHECK: sll_v16i8:
5
6 %1 = load <16 x i8>* %a
7 ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
8 %2 = load <16 x i8>* %b
9 ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
10 %3 = shl <16 x i8> %1, %2
11 ; CHECK-DAG: sll.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
12 store <16 x i8> %3, <16 x i8>* %c
13 ; CHECK-DAG: st.b [[R3]], 0($4)
14
15 ret void
16 ; CHECK: .size sll_v16i8
17}
18
19define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
20 ; CHECK: sll_v8i16:
21
22 %1 = load <8 x i16>* %a
23 ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
24 %2 = load <8 x i16>* %b
25 ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
26 %3 = shl <8 x i16> %1, %2
27 ; CHECK-DAG: sll.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
28 store <8 x i16> %3, <8 x i16>* %c
29 ; CHECK-DAG: st.h [[R3]], 0($4)
30
31 ret void
32 ; CHECK: .size sll_v8i16
33}
34
35define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
36 ; CHECK: sll_v4i32:
37
38 %1 = load <4 x i32>* %a
39 ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
40 %2 = load <4 x i32>* %b
41 ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
42 %3 = shl <4 x i32> %1, %2
43 ; CHECK-DAG: sll.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
44 store <4 x i32> %3, <4 x i32>* %c
45 ; CHECK-DAG: st.w [[R3]], 0($4)
46
47 ret void
48 ; CHECK: .size sll_v4i32
49}
50
51define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
52 ; CHECK: sll_v2i64:
53
54 %1 = load <2 x i64>* %a
55 ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
56 %2 = load <2 x i64>* %b
57 ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
58 %3 = shl <2 x i64> %1, %2
59 ; CHECK-DAG: sll.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
60 store <2 x i64> %3, <2 x i64>* %c
61 ; CHECK-DAG: st.d [[R3]], 0($4)
62
63 ret void
64 ; CHECK: .size sll_v2i64
65}
66
67define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
68 ; CHECK: sra_v16i8:
69
70 %1 = load <16 x i8>* %a
71 ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
72 %2 = load <16 x i8>* %b
73 ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
74 %3 = ashr <16 x i8> %1, %2
75 ; CHECK-DAG: sra.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
76 store <16 x i8> %3, <16 x i8>* %c
77 ; CHECK-DAG: st.b [[R3]], 0($4)
78
79 ret void
80 ; CHECK: .size sra_v16i8
81}
82
83define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
84 ; CHECK: sra_v8i16:
85
86 %1 = load <8 x i16>* %a
87 ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
88 %2 = load <8 x i16>* %b
89 ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
90 %3 = ashr <8 x i16> %1, %2
91 ; CHECK-DAG: sra.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
92 store <8 x i16> %3, <8 x i16>* %c
93 ; CHECK-DAG: st.h [[R3]], 0($4)
94
95 ret void
96 ; CHECK: .size sra_v8i16
97}
98
99define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
100 ; CHECK: sra_v4i32:
101
102 %1 = load <4 x i32>* %a
103 ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
104 %2 = load <4 x i32>* %b
105 ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
106 %3 = ashr <4 x i32> %1, %2
107 ; CHECK-DAG: sra.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
108 store <4 x i32> %3, <4 x i32>* %c
109 ; CHECK-DAG: st.w [[R3]], 0($4)
110
111 ret void
112 ; CHECK: .size sra_v4i32
113}
114
115define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
116 ; CHECK: sra_v2i64:
117
118 %1 = load <2 x i64>* %a
119 ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
120 %2 = load <2 x i64>* %b
121 ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
122 %3 = ashr <2 x i64> %1, %2
123 ; CHECK-DAG: sra.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
124 store <2 x i64> %3, <2 x i64>* %c
125 ; CHECK-DAG: st.d [[R3]], 0($4)
126
127 ret void
128 ; CHECK: .size sra_v2i64
129}
130
131define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
132 ; CHECK: srl_v16i8:
133
134 %1 = load <16 x i8>* %a
135 ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
136 %2 = load <16 x i8>* %b
137 ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
138 %3 = lshr <16 x i8> %1, %2
139 ; CHECK-DAG: srl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
140 store <16 x i8> %3, <16 x i8>* %c
141 ; CHECK-DAG: st.b [[R3]], 0($4)
142
143 ret void
144 ; CHECK: .size srl_v16i8
145}
146
147define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
148 ; CHECK: srl_v8i16:
149
150 %1 = load <8 x i16>* %a
151 ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
152 %2 = load <8 x i16>* %b
153 ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
154 %3 = lshr <8 x i16> %1, %2
155 ; CHECK-DAG: srl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
156 store <8 x i16> %3, <8 x i16>* %c
157 ; CHECK-DAG: st.h [[R3]], 0($4)
158
159 ret void
160 ; CHECK: .size srl_v8i16
161}
162
163define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
164 ; CHECK: srl_v4i32:
165
166 %1 = load <4 x i32>* %a
167 ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
168 %2 = load <4 x i32>* %b
169 ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
170 %3 = lshr <4 x i32> %1, %2
171 ; CHECK-DAG: srl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
172 store <4 x i32> %3, <4 x i32>* %c
173 ; CHECK-DAG: st.w [[R3]], 0($4)
174
175 ret void
176 ; CHECK: .size srl_v4i32
177}
178
179define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
180 ; CHECK: srl_v2i64:
181
182 %1 = load <2 x i64>* %a
183 ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
184 %2 = load <2 x i64>* %b
185 ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
186 %3 = lshr <2 x i64> %1, %2
187 ; CHECK-DAG: srl.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
188 store <2 x i64> %3, <2 x i64>* %c
189 ; CHECK-DAG: st.d [[R3]], 0($4)
190
191 ret void
192 ; CHECK: .size srl_v2i64
193}
194
195define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
196 ; CHECK: ctlz_v16i8:
197
198 %1 = load <16 x i8>* %a
199 ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
200 %2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1)
201 ; CHECK-DAG: nlzc.b [[R3:\$w[0-9]+]], [[R1]]
202 store <16 x i8> %2, <16 x i8>* %c
203 ; CHECK-DAG: st.b [[R3]], 0($4)
204
205 ret void
206 ; CHECK: .size ctlz_v16i8
207}
208
209define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
210 ; CHECK: ctlz_v8i16:
211
212 %1 = load <8 x i16>* %a
213 ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
214 %2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1)
215 ; CHECK-DAG: nlzc.h [[R3:\$w[0-9]+]], [[R1]]
216 store <8 x i16> %2, <8 x i16>* %c
217 ; CHECK-DAG: st.h [[R3]], 0($4)
218
219 ret void
220 ; CHECK: .size ctlz_v8i16
221}
222
223define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
224 ; CHECK: ctlz_v4i32:
225
226 %1 = load <4 x i32>* %a
227 ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
228 %2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1)
229 ; CHECK-DAG: nlzc.w [[R3:\$w[0-9]+]], [[R1]]
230 store <4 x i32> %2, <4 x i32>* %c
231 ; CHECK-DAG: st.w [[R3]], 0($4)
232
233 ret void
234 ; CHECK: .size ctlz_v4i32
235}
236
237define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
238 ; CHECK: ctlz_v2i64:
239
240 %1 = load <2 x i64>* %a
241 ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
242 %2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1)
243 ; CHECK-DAG: nlzc.d [[R3:\$w[0-9]+]], [[R1]]
244 store <2 x i64> %2, <2 x i64>* %c
245 ; CHECK-DAG: st.d [[R3]], 0($4)
246
247 ret void
248 ; CHECK: .size ctlz_v2i64
249}
250
251declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %val)
252declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %val)
253declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val)
254declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %val)