blob: b40cf5bed600f6a7143fd5f95a4cd9f733fbba06 [file] [log] [blame]
Chris Lattnerdd173942010-04-14 03:54:58 +00001// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
2
Chris Lattnerdd173942010-04-14 03:54:58 +00003int main ()
4{
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00005 // TODO: uncomment
6/* vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }; */
Chris Lattnerdd173942010-04-14 03:54:58 +00007 vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
8 vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00009 // TODO: uncomment
10/* vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 }; */
Chris Lattnerdd173942010-04-14 03:54:58 +000011 vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 };
12 vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 };
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000013 // TODO: uncomment
14/* vector bool int vbi = { 1, 0, 1, 0 }; */
Chris Lattnerdd173942010-04-14 03:54:58 +000015 vector int vi = { -1, 2, -3, 4 };
16 vector unsigned int vui = { 1, 2, 3, 4 };
17 vector float vf = { -1.5, 2.5, -3.5, 4.5 };
18
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000019 // TODO: uncomment
20/* vector bool char res_vbc; */
Chris Lattnerab866b42010-04-14 20:35:39 +000021 vector signed char res_vsc;
Chris Lattnerdd173942010-04-14 03:54:58 +000022 vector unsigned char res_vuc;
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000023 // TODO: uncomment
24/* vector bool short res_vbs; */
Chris Lattnerdd173942010-04-14 03:54:58 +000025 vector short res_vs;
26 vector unsigned short res_vus;
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000027 // TODO: uncomment
28 vector pixel res_vp;
29 // TODO: uncomment
30/* vector bool int res_vbi; */
Chris Lattnerdd173942010-04-14 03:54:58 +000031 vector int res_vi;
32 vector unsigned int res_vui;
33 vector float res_vf;
34
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000035 signed char param_sc;
36 unsigned char param_uc;
37 short param_s;
38 unsigned short param_us;
Chris Lattnerdd173942010-04-14 03:54:58 +000039 int param_i;
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000040 unsigned int param_ui;
41 float param_f;
42
Chris Lattnerdd173942010-04-14 03:54:58 +000043 int res_i;
44
45 /* vec_abs */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000046 vsc = vec_abs(vsc); // CHECK: sub <16 x i8> zeroinitializer
47 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdd173942010-04-14 03:54:58 +000048
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000049 vs = vec_abs(vs); // CHECK: sub <8 x i16> zeroinitializer
50 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdd173942010-04-14 03:54:58 +000051
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000052 vi = vec_abs(vi); // CHECK: sub <4 x i32> zeroinitializer
53 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdd173942010-04-14 03:54:58 +000054
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000055 vf = vec_abs(vf); // CHECK: and <4 x i32>
Chris Lattnerdd173942010-04-14 03:54:58 +000056
57 /* vec_abs */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000058 vsc = vec_abss(vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
59 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdd173942010-04-14 03:54:58 +000060
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000061 vs = vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs
62 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdd173942010-04-14 03:54:58 +000063
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000064 vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws
65 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdd173942010-04-14 03:54:58 +000066
67 /* vec_add */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000068 res_vsc = vec_add(vsc, vsc); // CHECK: add nsw <16 x i8>
69 res_vuc = vec_add(vuc, vuc); // CHECK: add <16 x i8>
70 res_vs = vec_add(vs, vs); // CHECK: add nsw <8 x i16>
71 res_vus = vec_add(vus, vus); // CHECK: add <8 x i16>
72 res_vi = vec_add(vi, vi); // CHECK: add nsw <4 x i32>
73 res_vui = vec_add(vui, vui); // CHECK: add <4 x i32>
74 res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float>
75 res_vsc = vec_vaddubm(vsc, vsc); // CHECK: add nsw <16 x i8>
Chris Lattnerdd173942010-04-14 03:54:58 +000076 res_vuc = vec_vaddubm(vuc, vuc); // CHECK: add <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000077 res_vs = vec_vadduhm(vs, vs); // CHECK: add nsw <8 x i16>
Chris Lattnerdd173942010-04-14 03:54:58 +000078 res_vus = vec_vadduhm(vus, vus); // CHECK: add <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000079 res_vi = vec_vadduwm(vi, vi); // CHECK: add nsw <4 x i32>
Chris Lattnerdd173942010-04-14 03:54:58 +000080 res_vui = vec_vadduwm(vui, vui); // CHECK: add <4 x i32>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000081 res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float>
Chris Lattnerdd173942010-04-14 03:54:58 +000082
83 /* vec_addc */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000084 res_vui = vec_addc(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
Chris Lattnerdd173942010-04-14 03:54:58 +000085 res_vui = vec_vaddcuw(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
86
87 /* vec_adds */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000088 res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
89 res_vuc = vec_adds(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
90 res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
91 res_vus = vec_adds(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
92 res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
93 res_vui = vec_adds(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
94 res_vsc = vec_vaddsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
Chris Lattnerdd173942010-04-14 03:54:58 +000095 res_vuc = vec_vaddubs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000096 res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
Chris Lattnerdd173942010-04-14 03:54:58 +000097 res_vus = vec_vadduhs(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000098 res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
Chris Lattnerdd173942010-04-14 03:54:58 +000099 res_vui = vec_vadduws(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
100
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000101 /* vec_and */
102 res_vsc = vec_and(vsc, vsc); // CHECK: and <16 x i8>
103 res_vuc = vec_and(vuc, vuc); // CHECK: and <16 x i8>
104 res_vs = vec_and(vs, vs); // CHECK: and <8 x i16>
105 res_vus = vec_and(vus, vus); // CHECK: and <8 x i16>
106 res_vi = vec_and(vi, vi); // CHECK: and <4 x i32>
107 res_vui = vec_and(vui, vui); // CHECK: and <4 x i32>
108 res_vsc = vec_vand(vsc, vsc); // CHECK: and <16 x i8>
109 res_vuc = vec_vand(vuc, vuc); // CHECK: and <16 x i8>
110 res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16>
111 res_vus = vec_vand(vus, vus); // CHECK: and <8 x i16>
112 res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32>
113 res_vui = vec_vand(vui, vui); // CHECK: and <4 x i32>
Chris Lattnerdd173942010-04-14 03:54:58 +0000114
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000115 /* vec_andc */
116 res_vsc = vec_andc(vsc, vsc); // CHECK: xor <16 x i8>
117 // CHECK: and <16 x i8>
118
119 res_vuc = vec_andc(vuc, vuc); // CHECK: xor <16 x i8>
120 // CHECK: and <16 x i8>
121
122 res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16>
123 // CHECK: and <8 x i16>
124
125 res_vus = vec_andc(vus, vus); // CHECK: xor <8 x i16>
126 // CHECK: and <8 x i16>
127
128 res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32>
129 // CHECK: and <4 x i32>
130
131 res_vui = vec_andc(vui, vui); // CHECK: xor <4 x i32>
132 // CHECK: and <4 x i32>
133
134 res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32>
135 // CHECK: and <4 x i32>
136
137 res_vsc = vec_vandc(vsc, vsc); // CHECK: xor <16 x i8>
138 // CHECK: and <16 x i8>
139
140 res_vuc = vec_vandc(vuc, vuc); // CHECK: xor <16 x i8>
141 // CHECK: and <16 x i8>
142
143 res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16>
144 // CHECK: and <8 x i16>
145
146 res_vus = vec_vandc(vus, vus); // CHECK: xor <8 x i16>
147 // CHECK: and <8 x i16>
148
149 res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32>
150 // CHECK: and <4 x i32>
151
152 res_vui = vec_vandc(vui, vui); // CHECK: xor <4 x i32>
153 // CHECK: and <4 x i32>
154
155 res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32>
156 // CHECK: and <4 x i32>
Chris Lattnerdd173942010-04-14 03:54:58 +0000157
158 /* vec_avg */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000159 res_vsc = vec_avg(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
160 res_vuc = vec_avg(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
161 res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
162 res_vus = vec_avg(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
163 res_vi = vec_avg(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
164 res_vui = vec_avg(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
165 res_vsc = vec_vavgsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
166 res_vuc = vec_vavgub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
Chris Lattnerdd173942010-04-14 03:54:58 +0000167 res_vs = vec_vavgsh(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000168 res_vus = vec_vavguh(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
Chris Lattnerdd173942010-04-14 03:54:58 +0000169 res_vi = vec_vavgsw(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000170 res_vui = vec_vavguw(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
Chris Lattnerdd173942010-04-14 03:54:58 +0000171
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000172 /* vec_ceil */
173 res_vf = vec_ceil(vf); // CHECK: @llvm.ppc.altivec.vrfip
174 res_vf = vec_vrfip(vf); // CHECK: @llvm.ppc.altivec.vrfip
Chris Lattnerdd173942010-04-14 03:54:58 +0000175
176 /* vec_cmpb */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000177 res_vi = vec_cmpb(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
Chris Lattnerdd173942010-04-14 03:54:58 +0000178 res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
179
180 /* vec_cmpeq */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000181 // TODO: uncomment
182 /*res_vbc = */vec_cmpeq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb
183 /*res_vbc = */vec_cmpeq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb
184 /*res_vbs = */vec_cmpeq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh
185 /*res_vbs = */vec_cmpeq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh
186 /*res_vbi = */vec_cmpeq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw
187 /*res_vbi = */vec_cmpeq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw
188 /*res_vbi = */vec_cmpeq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp
Chris Lattnerdd173942010-04-14 03:54:58 +0000189
190 /* vec_cmpge */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000191 // TODO: uncomment
192 /*res_vbi = */vec_cmpge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
193 /*res_vbi = */vec_vcmpgefp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
Chris Lattnerdd173942010-04-14 03:54:58 +0000194
195 /* vec_cmpgt */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000196 // TODO: uncomment
197 /*res_vbc = */vec_cmpgt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
198 /*res_vbc = */vec_cmpgt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
199 /*res_vbs = */vec_cmpgt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
200 /*res_vbs = */vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
201 /*res_vbi = */vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
202 /*res_vbi = */vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
203 /*res_vbi = */vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
204 /*res_vbc = */vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
205 /*res_vbc = */vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
206 /*res_vbs = */vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
207 /*res_vbs = */vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
208 /*res_vbi = */vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
209 /*res_vbi = */vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
210 /*res_vbi = */vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
Chris Lattnerdd173942010-04-14 03:54:58 +0000211
212 /* vec_cmple */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000213 // TODO: uncomment
214 /*res_vbi = */vec_cmple(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
Chris Lattnerdd173942010-04-14 03:54:58 +0000215
216 /* vec_cmplt */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000217 // TODO: uncomment
218 /*res_vbc = */vec_cmplt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
219 /*res_vbc = */vec_cmplt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
220 /*res_vbs = */vec_cmplt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
221 /*res_vbs = */vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
222 /*res_vbi = */vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
223 /*res_vbi = */vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
224 /*res_vbi = */vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
225
226 /* vec_ctf */
227 res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx
228 res_vf = vec_ctf(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
229 res_vf = vec_vcfsx(vi, 0); // CHECK: @llvm.ppc.altivec.vcfsx
230 res_vf = vec_vcfux(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
231
232 /* vec_cts */
233 res_vi = vec_cts(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
234 res_vi = vec_vctsxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
235
236 /* vec_ctu */
237 res_vui = vec_ctu(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
238 res_vui = vec_vctuxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
239
240 /* vec_dss */
241 vec_dss(param_i); // CHECK: @llvm.ppc.altivec.dss
242
243 /* vec_dssall */
244 vec_dssall(); // CHECK: @llvm.ppc.altivec.dssall
245
246 /* vec_dst */
247 vec_dst(&vsc, 0, 0); // CHECK: @llvm.ppc.altivec.dst
248
249 /* vec_dstst */
250 vec_dstst(&vs, 0, 0); // CHECK: @llvm.ppc.altivec.dstst
251
252 /* vec_dststt */
253 vec_dststt(&param_i, 0, 0); // CHECK: @llvm.ppc.altivec.dststt
254
255 /* vec_dstt */
256 vec_dstt(&vf, 0, 0); // CHECK: @llvm.ppc.altivec.dstt
257
258 /* vec_expte */
259 res_vf = vec_expte(vf); // CHECK: @llvm.ppc.altivec.vexptefp
260 res_vf = vec_vexptefp(vf); // CHECK: @llvm.ppc.altivec.vexptefp
261
262 /* vec_floor */
263 res_vf = vec_floor(vf); // CHECK: @llvm.ppc.altivec.vrfim
264 res_vf = vec_vrfim(vf); // CHECK: @llvm.ppc.altivec.vrfim
265
266 /* vec_ld */
267 res_vsc = vec_ld(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
268 res_vsc = vec_ld(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
269 res_vuc = vec_ld(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
270 res_vuc = vec_ld(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
271 res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
272 res_vs = vec_ld(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
273 res_vus = vec_ld(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
274 res_vus = vec_ld(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
275 res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
276 res_vi = vec_ld(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
277 res_vui = vec_ld(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
278 res_vui = vec_ld(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
279 res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
280 res_vf = vec_ld(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
281 res_vsc = vec_lvx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
282 res_vsc = vec_lvx(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
283 res_vuc = vec_lvx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
284 res_vuc = vec_lvx(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
285 res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
286 res_vs = vec_lvx(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
287 res_vus = vec_lvx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
288 res_vus = vec_lvx(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
289 res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
290 res_vi = vec_lvx(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
291 res_vui = vec_lvx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
292 res_vui = vec_lvx(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
293 res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
294 res_vf = vec_lvx(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
295
296 /* vec_lde */
297 res_vsc = vec_lde(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
298 res_vuc = vec_lde(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
299 res_vs = vec_lde(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
300 res_vus = vec_lde(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
301 res_vi = vec_lde(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
302 res_vui = vec_lde(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
303 res_vf = vec_lde(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
304 res_vsc = vec_lvebx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
305 res_vuc = vec_lvebx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
306 res_vs = vec_lvehx(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
307 res_vus = vec_lvehx(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
308 res_vi = vec_lvewx(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
309 res_vui = vec_lvewx(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
310 res_vf = vec_lvewx(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
311
312 /* vec_ldl */
313 res_vsc = vec_ldl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
314 res_vsc = vec_ldl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
315 res_vuc = vec_ldl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
316 res_vuc = vec_ldl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
317 res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
318 res_vs = vec_ldl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
319 res_vus = vec_ldl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
320 res_vus = vec_ldl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
321 res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
322 res_vi = vec_ldl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
323 res_vui = vec_ldl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
324 res_vui = vec_ldl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
325 res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
326 res_vf = vec_ldl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
327 res_vsc = vec_lvxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
328 res_vsc = vec_lvxl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
329 res_vuc = vec_lvxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
330 res_vuc = vec_lvxl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
331 res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
332 res_vs = vec_lvxl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
333 res_vus = vec_lvxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
334 res_vus = vec_lvxl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
335 res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
336 res_vi = vec_lvxl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
337 res_vui = vec_lvxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
338 res_vui = vec_lvxl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
339 res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
340 res_vf = vec_lvxl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
341
342 /* vec_loge */
343 res_vf = vec_loge(vf); // CHECK: @llvm.ppc.altivec.vlogefp
344 res_vf = vec_vlogefp(vf); // CHECK: @llvm.ppc.altivec.vlogefp
345
346 /* vec_lvsl */
347 res_vuc = vec_lvsl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsl
348
349 /* vec_lvsr */
350 res_vuc = vec_lvsr(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsr
351
352 /* vec_madd */
353 res_vf =vec_madd(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
354 res_vf = vec_vmaddfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
355
356 /* vec_madds */
357 res_vs = vec_madds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
358 res_vs = vec_vmhaddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
Chris Lattnerdd173942010-04-14 03:54:58 +0000359
360 /* vec_max */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000361 res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
362 res_vuc = vec_max(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
363 res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdd173942010-04-14 03:54:58 +0000364 res_vus = vec_max(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000365 res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
366 res_vui = vec_max(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
367 res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
368 res_vsc = vec_vmaxsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
369 res_vuc = vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
370 res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
371 res_vus = vec_vmaxuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
372 res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdd173942010-04-14 03:54:58 +0000373 res_vui = vec_vmaxuw(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000374 res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
375
376 /* vec_mergeh */
377 res_vsc = vec_mergeh(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
378 res_vuc = vec_mergeh(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
379 res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
380 res_vus = vec_mergeh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
381 res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
382 res_vui = vec_mergeh(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
383 res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
384 res_vsc = vec_vmrghb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
385 res_vuc = vec_vmrghb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
386 res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
387 res_vus = vec_vmrghh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
388 res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
389 res_vui = vec_vmrghw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
390 res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
391
392 /* vec_mergel */
393 res_vsc = vec_mergel(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
394 res_vuc = vec_mergel(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
395 res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
396 res_vus = vec_mergel(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
397 res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
398 res_vui = vec_mergel(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
399 res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
400 res_vsc = vec_vmrglb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
401 res_vuc = vec_vmrglb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
402 res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
403 res_vus = vec_vmrglh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
404 res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
405 res_vui = vec_vmrglw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
406 res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
Chris Lattnerdd173942010-04-14 03:54:58 +0000407
408 /* vec_mfvscr */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000409 vus = vec_mfvscr(); // CHECK: @llvm.ppc.altivec.mfvscr
Chris Lattnerdd173942010-04-14 03:54:58 +0000410
411 /* vec_min */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000412 res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
413 res_vuc = vec_min(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
414 res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
Chris Lattnerdd173942010-04-14 03:54:58 +0000415 res_vus = vec_min(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000416 res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
417 res_vui = vec_min(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
418 res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
419 res_vsc = vec_vminsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
420 res_vuc = vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
421 res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
422 res_vus = vec_vminuh(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
423 res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
Chris Lattnerdd173942010-04-14 03:54:58 +0000424 res_vui = vec_vminuw(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000425 res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
426
427 /* vec_mladd */
428 res_vus = vec_mladd(vus, vus, vus); // CHECK: mul <8 x i16>
429 // CHECK: add <8 x i16>
430
431 res_vs = vec_mladd(vus, vs, vs); // CHECK: mul <8 x i16>
432 // CHECK: add nsw <8 x i16>
433
434 res_vs = vec_mladd(vs, vus, vus); // CHECK: mul <8 x i16>
435 // CHECK: add nsw <8 x i16>
436
437 res_vs = vec_mladd(vs, vs, vs); // CHECK: mul <8 x i16>
438 // CHECK: add nsw <8 x i16>
439
440 /* vec_mradds */
441 res_vs = vec_mradds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
442 res_vs = vec_vmhraddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
443
444 /* vec_msum */
445 res_vi = vec_msum(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
446 res_vui = vec_msum(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
447 res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
448 res_vui = vec_msum(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
449 res_vi = vec_vmsummbm(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
450 res_vui = vec_vmsumubm(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
451 res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
452 res_vui = vec_vmsumuhm(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
453
454 /* vec_msums */
455 res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
456 res_vui = vec_msums(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
457 res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
458 res_vui = vec_vmsumuhs(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
Chris Lattnerdd173942010-04-14 03:54:58 +0000459
460 /* vec_mtvscr */
Chris Lattnerab866b42010-04-14 20:35:39 +0000461 vec_mtvscr(vsc); // CHECK: @llvm.ppc.altivec.mtvscr
Chris Lattnerdd173942010-04-14 03:54:58 +0000462
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000463 /* vec_mule */
464 res_vs = vec_mule(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
465 res_vus = vec_mule(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
466 res_vi = vec_mule(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
467 res_vui = vec_mule(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
468 res_vs = vec_vmulesb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
469 res_vus = vec_vmuleub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
470 res_vi = vec_vmulesh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
471 res_vui = vec_vmuleuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
Chris Lattnerdd173942010-04-14 03:54:58 +0000472
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000473 /* vec_mulo */
474 res_vs = vec_mulo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
475 res_vus = vec_mulo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
476 res_vi = vec_mulo(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
477 res_vui = vec_mulo(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
478 res_vs = vec_vmulosb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
479 res_vus = vec_vmuloub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
480 res_vi = vec_vmulosh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
481 res_vui = vec_vmulouh(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
482
483 /* vec_nmsub */
484 res_vf = vec_nmsub(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
485 res_vf = vec_vnmsubfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
486
487 /* vec_nor */
488 res_vsc = vec_nor(vsc, vsc); // CHECK: or <16 x i8>
489 // CHECK: xor <16 x i8>
490
491 res_vuc = vec_nor(vuc, vuc); // CHECK: or <16 x i8>
492 // CHECK: xor <16 x i8>
493
494 res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16>
495 // CHECK: xor <8 x i16>
496
497 res_vus = vec_nor(vus, vus); // CHECK: or <8 x i16>
498 // CHECK: xor <8 x i16>
499
500 res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32>
501 // CHECK: xor <4 x i32>
502
503 res_vui = vec_nor(vui, vui); // CHECK: or <4 x i32>
504 // CHECK: xor <4 x i32>
505
506 res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32>
507 // CHECK: xor <4 x i32>
508
509 res_vsc = vec_vnor(vsc, vsc); // CHECK: or <16 x i8>
510 // CHECK: xor <16 x i8>
511
512 res_vuc = vec_vnor(vuc, vuc); // CHECK: or <16 x i8>
513 // CHECK: xor <16 x i8>
514
515 res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16>
516 // CHECK: xor <8 x i16>
517
518 res_vus = vec_vnor(vus, vus); // CHECK: or <8 x i16>
519 // CHECK: xor <8 x i16>
520
521 res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32>
522 // CHECK: xor <4 x i32>
523
524 res_vui = vec_vnor(vui, vui); // CHECK: or <4 x i32>
525 // CHECK: xor <4 x i32>
526
527 res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32>
528 // CHECK: xor <4 x i32>
529
530 /* vec_or */
531 res_vsc = vec_or(vsc, vsc); // CHECK: or <16 x i8>
532 res_vuc = vec_or(vuc, vuc); // CHECK: or <16 x i8>
533 res_vs = vec_or(vs, vs); // CHECK: or <8 x i16>
534 res_vus = vec_or(vus, vus); // CHECK: or <8 x i16>
535 res_vi = vec_or(vi, vi); // CHECK: or <4 x i32>
536 res_vui = vec_or(vui, vui); // CHECK: or <4 x i32>
537 res_vf = vec_or(vf, vf); // CHECK: or <4 x i32>
538 res_vsc = vec_vor(vsc, vsc); // CHECK: or <16 x i8>
539 res_vuc = vec_vor(vuc, vuc); // CHECK: or <16 x i8>
540 res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16>
541 res_vus = vec_vor(vus, vus); // CHECK: or <8 x i16>
542 res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32>
543 res_vui = vec_vor(vui, vui); // CHECK: or <4 x i32>
544 res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32>
545
546 /* vec_pack */
547 res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
548 res_vuc = vec_pack(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
549 res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
550 res_vus = vec_pack(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
551 res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
552 res_vuc = vec_vpkuhum(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
553 res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
554 res_vus = vec_vpkuwum(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
555
556 /* vec_packpx */
557 res_vp = vec_packpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
558 res_vp = vec_vpkpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
559
560 /* vec_packs */
561 res_vsc = vec_packs(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
562 res_vuc = vec_packs(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
563 res_vs = vec_packs(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
564 res_vus = vec_packs(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
565 res_vsc = vec_vpkshss(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
566 res_vuc = vec_vpkuhus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
567 res_vs = vec_vpkswss(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
568 res_vus = vec_vpkuwus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
569
570 /* vec_packsu */
571 res_vuc = vec_packsu(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
572 res_vuc = vec_packsu(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
573 res_vus = vec_packsu(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
574 res_vus = vec_packsu(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
575 res_vuc = vec_vpkshus(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
576 res_vuc = vec_vpkshus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
577 res_vus = vec_vpkswus(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
578 res_vus = vec_vpkswus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
579
580 /* vec_perm */
581 res_vsc = vec_perm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
582 res_vuc = vec_perm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
583 res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
584 res_vus = vec_perm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
585 res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
586 res_vui = vec_perm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
587 res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
588 res_vsc = vec_vperm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
589 res_vuc = vec_vperm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
590 res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
591 res_vus = vec_vperm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
592 res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
593 res_vui = vec_vperm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
594 res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
595
596 /* vec_re */
597 res_vf = vec_re(vf); // CHECK: @llvm.ppc.altivec.vrefp
598 res_vf = vec_vrefp(vf); // CHECK: @llvm.ppc.altivec.vrefp
599
600 /* vec_rl */
601 res_vsc = vec_rl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
602 res_vuc = vec_rl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
603 res_vs = vec_rl(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
604 res_vus = vec_rl(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
605 res_vi = vec_rl(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
606 res_vui = vec_rl(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
607 res_vsc = vec_vrlb(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
608 res_vuc = vec_vrlb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
609 res_vs = vec_vrlh(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
610 res_vus = vec_vrlh(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
611 res_vi = vec_vrlw(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
612 res_vui = vec_vrlw(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
613
614 /* vec_round */
615 res_vf = vec_round(vf); // CHECK: @llvm.ppc.altivec.vrfin
616 res_vf = vec_vrfin(vf); // CHECK: @llvm.ppc.altivec.vrfin
617
618 /* vec_rsqrte */
619 res_vf = vec_rsqrte(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
620 res_vf = vec_vrsqrtefp(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
621
622 /* vec_sel */
623 res_vsc = vec_sel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
624 // CHECK: and <16 x i8>
625 // CHECK: and <16 x i8>
626 // CHECK: or <16 x i8>
627
628 res_vuc = vec_sel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
629 // CHECK: and <16 x i8>
630 // CHECK: and <16 x i8>
631 // CHECK: or <16 x i8>
632
633 res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16>
634 // CHECK: and <8 x i16>
635 // CHECK: and <8 x i16>
636 // CHECK: or <8 x i16>
637
638
639 res_vus = vec_sel(vus, vus, vus); // CHECK: xor <8 x i16>
640 // CHECK: and <8 x i16>
641 // CHECK: and <8 x i16>
642 // CHECK: or <8 x i16>
643
644 res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32>
645 // CHECK: and <4 x i32>
646 // CHECK: and <4 x i32>
647 // CHECK: or <4 x i32>
648
649
650 res_vui = vec_sel(vui, vui, vui); // CHECK: xor <4 x i32>
651 // CHECK: and <4 x i32>
652 // CHECK: and <4 x i32>
653 // CHECK: or <4 x i32>
654
655
656 res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32>
657 // CHECK: and <4 x i32>
658 // CHECK: and <4 x i32>
659 // CHECK: or <4 x i32>
660
661 res_vsc = vec_vsel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
662 // CHECK: and <16 x i8>
663 // CHECK: and <16 x i8>
664 // CHECK: or <16 x i8>
665
666 res_vuc = vec_vsel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
667 // CHECK: and <16 x i8>
668 // CHECK: and <16 x i8>
669 // CHECK: or <16 x i8>
670
671 res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16>
672 // CHECK: and <8 x i16>
673 // CHECK: and <8 x i16>
674 // CHECK: or <8 x i16>
675
676
677 res_vus = vec_vsel(vus, vus, vus); // CHECK: xor <8 x i16>
678 // CHECK: and <8 x i16>
679 // CHECK: and <8 x i16>
680 // CHECK: or <8 x i16>
681
682 res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32>
683 // CHECK: and <4 x i32>
684 // CHECK: and <4 x i32>
685 // CHECK: or <4 x i32>
686
687
688 res_vui = vec_vsel(vui, vui, vui); // CHECK: xor <4 x i32>
689 // CHECK: and <4 x i32>
690 // CHECK: and <4 x i32>
691 // CHECK: or <4 x i32>
692
693
694 res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32>
695 // CHECK: and <4 x i32>
696 // CHECK: and <4 x i32>
697 // CHECK: or <4 x i32>
698
699
700 /* vec_sl */
701 res_vsc = vec_sl(vsc, vuc); // CHECK: shl <16 x i8>
702 res_vuc = vec_sl(vuc, vuc); // CHECK: shl <16 x i8>
703 res_vs = vec_sl(vs, vus); // CHECK: shl <8 x i16>
704 res_vus = vec_sl(vus, vus); // CHECK: shl <8 x i16>
705 res_vi = vec_sl(vi, vui); // CHECK: shl <4 x i32>
706 res_vui = vec_sl(vui, vui); // CHECK: shl <4 x i32>
707 res_vsc = vec_vslb(vsc, vuc); // CHECK: shl <16 x i8>
708 res_vuc = vec_vslb(vuc, vuc); // CHECK: shl <16 x i8>
709 res_vs = vec_vslh(vs, vus); // CHECK: shl <8 x i16>
710 res_vus = vec_vslh(vus, vus); // CHECK: shl <8 x i16>
711 res_vi = vec_vslw(vi, vui); // CHECK: shl <4 x i32>
712 res_vui = vec_vslw(vui, vui); // CHECK: shl <4 x i32>
713
714 /* vec_sld */
715 res_vsc = vec_sld(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
716 res_vuc = vec_sld(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
717 res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
718 res_vus = vec_sld(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
719 res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
720 res_vui = vec_sld(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
721 res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
722 res_vsc = vec_vsldoi(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
723 res_vuc = vec_vsldoi(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
724 res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
725 res_vus = vec_vsldoi(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
726 res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
727 res_vui = vec_vsldoi(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
728 res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
729
730 /* vec_sll */
731 res_vsc = vec_sll(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
732 res_vsc = vec_sll(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
733 res_vsc = vec_sll(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
734 res_vuc = vec_sll(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
735 res_vuc = vec_sll(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
736 res_vuc = vec_sll(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
737 res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
738 res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
739 res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
740 res_vus = vec_sll(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
741 res_vus = vec_sll(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
742 res_vus = vec_sll(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
743 res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
744 res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
745 res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
746 res_vui = vec_sll(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
747 res_vui = vec_sll(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
748 res_vui = vec_sll(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
749 res_vsc = vec_vsl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
750 res_vsc = vec_vsl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
751 res_vsc = vec_vsl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
752 res_vuc = vec_vsl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
753 res_vuc = vec_vsl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
754 res_vuc = vec_vsl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
755 res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
756 res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
757 res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
758 res_vus = vec_vsl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
759 res_vus = vec_vsl(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
760 res_vus = vec_vsl(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
761 res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
762 res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
763 res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
764 res_vui = vec_vsl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
765 res_vui = vec_vsl(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
766 res_vui = vec_vsl(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
767
768 /* vec_slo */
769 res_vsc = vec_slo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
770 res_vsc = vec_slo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
771 res_vuc = vec_slo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
772 res_vuc = vec_slo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
773 res_vs = vec_slo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
774 res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
775 res_vus = vec_slo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
776 res_vus = vec_slo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
777 res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
778 res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
779 res_vui = vec_slo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
780 res_vui = vec_slo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
781 res_vf = vec_slo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
782 res_vf = vec_slo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
783 res_vsc = vec_vslo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
784 res_vsc = vec_vslo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
785 res_vuc = vec_vslo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
786 res_vuc = vec_vslo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
787 res_vs = vec_vslo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
788 res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
789 res_vus = vec_vslo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
790 res_vus = vec_vslo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
791 res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
792 res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
793 res_vui = vec_vslo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
794 res_vui = vec_vslo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
795 res_vf = vec_vslo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
796 res_vf = vec_vslo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
797
798 /* vec_splat */
799 res_vsc = vec_splat(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
800 res_vuc = vec_splat(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
801 res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
802 res_vus = vec_splat(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
803 res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
804 res_vui = vec_splat(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
805 res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
806 res_vsc = vec_vspltb(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
807 res_vuc = vec_vspltb(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
808 res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
809 res_vus = vec_vsplth(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
810 res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
811 res_vui = vec_vspltw(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
812 res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
813
814 /* vec_splat_s8 */
815 res_vsc = vec_splat_s8(0x09); // TODO: add check
816 res_vsc = vec_vspltisb(0x09); // TODO: add check
817
818 /* vec_splat_s16 */
819 res_vs = vec_splat_s16(0x09); // TODO: add check
820 res_vs = vec_vspltish(0x09); // TODO: add check
821
822 /* vec_splat_s32 */
823 res_vi = vec_splat_s32(0x09); // TODO: add check
824 res_vi = vec_vspltisw(0x09); // TODO: add check
825
826 /* vec_splat_u8 */
827 res_vuc = vec_splat_u8(0x09); // TODO: add check
828
829 /* vec_splat_u16 */
830 res_vus = vec_splat_u16(0x09); // TODO: add check
831
832 /* vec_splat_u32 */
833 res_vui = vec_splat_u32(0x09); // TODO: add check
834
835 /* vec_sr */
836 res_vsc = vec_sr(vsc, vuc); // CHECK: shr <16 x i8>
837 res_vuc = vec_sr(vuc, vuc); // CHECK: shr <16 x i8>
838 res_vs = vec_sr(vs, vus); // CHECK: shr <8 x i16>
839 res_vus = vec_sr(vus, vus); // CHECK: shr <8 x i16>
840 res_vi = vec_sr(vi, vui); // CHECK: shr <4 x i32>
841 res_vui = vec_sr(vui, vui); // CHECK: shr <4 x i32>
842 res_vsc = vec_vsrb(vsc, vuc); // CHECK: shr <16 x i8>
843 res_vuc = vec_vsrb(vuc, vuc); // CHECK: shr <16 x i8>
844 res_vs = vec_vsrh(vs, vus); // CHECK: shr <8 x i16>
845 res_vus = vec_vsrh(vus, vus); // CHECK: shr <8 x i16>
846 res_vi = vec_vsrw(vi, vui); // CHECK: shr <4 x i32>
847 res_vui = vec_vsrw(vui, vui); // CHECK: shr <4 x i32>
848
849 /* vec_sra */
850 res_vsc = vec_sra(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
851 res_vuc = vec_sra(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
852 res_vs = vec_sra(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
853 res_vus = vec_sra(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
854 res_vi = vec_sra(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
855 res_vui = vec_sra(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
856 res_vsc = vec_vsrab(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
857 res_vuc = vec_vsrab(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
858 res_vs = vec_vsrah(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
859 res_vus = vec_vsrah(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
860 res_vi = vec_vsraw(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
861 res_vui = vec_vsraw(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
862
863 /* vec_srl */
864 res_vsc = vec_srl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
865 res_vsc = vec_srl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
866 res_vsc = vec_srl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
867 res_vuc = vec_srl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
868 res_vuc = vec_srl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
869 res_vuc = vec_srl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
870 res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
871 res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
872 res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
873 res_vus = vec_srl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
874 res_vus = vec_srl(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
875 res_vus = vec_srl(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
876 res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
877 res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
878 res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
879 res_vui = vec_srl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
880 res_vui = vec_srl(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
881 res_vui = vec_srl(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
882 res_vsc = vec_vsr(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
883 res_vsc = vec_vsr(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
884 res_vsc = vec_vsr(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
885 res_vuc = vec_vsr(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
886 res_vuc = vec_vsr(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
887 res_vuc = vec_vsr(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
888 res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
889 res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
890 res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
891 res_vus = vec_vsr(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
892 res_vus = vec_vsr(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
893 res_vus = vec_vsr(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
894 res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
895 res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
896 res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
897 res_vui = vec_vsr(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
898 res_vui = vec_vsr(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
899 res_vui = vec_vsr(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
900
901 /* vec_sro */
902 res_vsc = vec_sro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
903 res_vsc = vec_sro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
904 res_vuc = vec_sro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
905 res_vuc = vec_sro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
906 res_vs = vec_sro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
907 res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
908 res_vus = vec_sro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
909 res_vus = vec_sro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
910 res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
911 res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
912 res_vui = vec_sro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
913 res_vui = vec_sro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
914 res_vf = vec_sro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
915 res_vf = vec_sro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
916 res_vsc = vec_vsro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
917 res_vsc = vec_vsro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
918 res_vuc = vec_vsro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
919 res_vuc = vec_vsro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
920 res_vs = vec_vsro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
921 res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
922 res_vus = vec_vsro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
923 res_vus = vec_vsro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
924 res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
925 res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
926 res_vui = vec_vsro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
927 res_vui = vec_vsro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
928 res_vf = vec_vsro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
929 res_vf = vec_vsro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
930
931 /* vec_st */
932 vec_st(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
933 vec_st(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
934 vec_st(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
935 vec_st(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
936 vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
937 vec_st(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
938 vec_st(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
939 vec_st(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
940 vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
941 vec_st(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
942 vec_st(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
943 vec_st(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
944 vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
945 vec_st(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
946 vec_stvx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
947 vec_stvx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
948 vec_stvx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
949 vec_stvx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
950 vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
951 vec_stvx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
952 vec_stvx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
953 vec_stvx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
954 vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
955 vec_stvx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
956 vec_stvx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
957 vec_stvx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
958 vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
959 vec_stvx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
960
961 /* vec_ste */
962 vec_ste(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
963 vec_ste(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
964 vec_ste(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
965 vec_ste(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
966 vec_ste(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
967 vec_ste(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
968 vec_ste(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
969 vec_stvebx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
970 vec_stvebx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
971 vec_stvehx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
972 vec_stvehx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
973 vec_stvewx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
974 vec_stvewx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
975 vec_stvewx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
976
977 /* vec_stl */
978 vec_stl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
979 vec_stl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
980 vec_stl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
981 vec_stl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
982 vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
983 vec_stl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
984 vec_stl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
985 vec_stl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
986 vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
987 vec_stl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
988 vec_stl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
989 vec_stl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
990 vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
991 vec_stl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
992 vec_stvxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
993 vec_stvxl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
994 vec_stvxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
995 vec_stvxl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
996 vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
997 vec_stvxl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
998 vec_stvxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
999 vec_stvxl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1000 vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
1001 vec_stvxl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
1002 vec_stvxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
1003 vec_stvxl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
1004 vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1005 vec_stvxl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1006
1007 /* vec_sub */
1008 res_vsc = vec_sub(vsc, vsc); // CHECK: sub nsw <16 x i8>
1009 res_vuc = vec_sub(vuc, vuc); // CHECK: sub <16 x i8>
1010 res_vs = vec_sub(vs, vs); // CHECK: sub nsw <8 x i16>
1011 res_vus = vec_sub(vus, vus); // CHECK: sub <8 x i16>
1012 res_vi = vec_sub(vi, vi); // CHECK: sub nsw <4 x i32>
1013 res_vui = vec_sub(vui, vui); // CHECK: sub <4 x i32>
1014 res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float>
1015 res_vsc = vec_vsububm(vsc, vsc); // CHECK: sub nsw <16 x i8>
1016 res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8>
1017 res_vs = vec_vsubuhm(vs, vs); // CHECK: sub nsw <8 x i16>
1018 res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16>
1019 res_vi = vec_vsubuwm(vi, vi); // CHECK: sub nsw <4 x i32>
1020 res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32>
1021 res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float>
1022
1023 /* vec_subc */
1024 res_vui = vec_subc(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1025 res_vui = vec_vsubcuw(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1026
1027 /* vec_subs */
1028 res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1029 res_vuc = vec_subs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1030 res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1031 res_vus = vec_subs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1032 res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1033 res_vui = vec_subs(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1034 res_vsc = vec_vsubsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1035 res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1036 res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1037 res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1038 res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1039 res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1040
1041 /* vec_sum4s */
1042 res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1043 res_vui = vec_sum4s(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1044 res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1045 res_vi = vec_vsum4sbs(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1046 res_vui = vec_vsum4ubs(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1047 res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1048
1049 /* vec_sum2s */
1050 res_vi = vec_sum2s(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1051 res_vi = vec_vsum2sws(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1052
1053 /* vec_sums */
1054 res_vi = vec_sums(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1055 res_vi = vec_vsumsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1056
1057 /* vec_trunc */
1058 res_vf = vec_trunc(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1059 res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1060
1061 /* vec_unpackh */
1062 res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1063 res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1064 res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1065 res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1066
1067 /* vec_unpackl */
1068 res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1069 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1070 res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1071 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1072
1073 /* vec_xor */
1074 res_vsc = vec_xor(vsc, vsc); // CHECK: xor <16 x i8>
1075 res_vuc = vec_xor(vuc, vuc); // CHECK: xor <16 x i8>
1076 res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16>
1077 res_vus = vec_xor(vus, vus); // CHECK: xor <8 x i16>
1078 res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32>
1079 res_vui = vec_xor(vui, vui); // CHECK: xor <4 x i32>
1080 res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32>
1081 res_vsc = vec_vxor(vsc, vsc); // CHECK: xor <16 x i8>
1082 res_vuc = vec_vxor(vuc, vuc); // CHECK: xor <16 x i8>
1083 res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16>
1084 res_vus = vec_vxor(vus, vus); // CHECK: xor <8 x i16>
1085 res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32>
1086 res_vui = vec_vxor(vui, vui); // CHECK: xor <4 x i32>
1087 res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32>
1088
1089 /* ------------------------------ predicates -------------------------------------- */
Chris Lattnerdd173942010-04-14 03:54:58 +00001090
1091 /* vec_all_eq */
Chris Lattnerab866b42010-04-14 20:35:39 +00001092 res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001093 res_i = vec_all_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1094 res_i = vec_all_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1095 res_i = vec_all_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1096 res_i = vec_all_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1097 res_i = vec_all_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1098 res_i = vec_all_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1099
1100 /* vec_all_ge */
Chris Lattnerab866b42010-04-14 20:35:39 +00001101 res_i = vec_all_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001102 res_i = vec_all_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1103 res_i = vec_all_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1104 res_i = vec_all_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1105 res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1106 res_i = vec_all_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001107 res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001108
1109 /* vec_all_gt */
Chris Lattnerab866b42010-04-14 20:35:39 +00001110 res_i = vec_all_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001111 res_i = vec_all_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1112 res_i = vec_all_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1113 res_i = vec_all_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1114 res_i = vec_all_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1115 res_i = vec_all_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1116 res_i = vec_all_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1117
1118 /* vec_all_in */
1119 res_i = vec_all_in(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1120
1121 /* vec_all_le */
Chris Lattnerab866b42010-04-14 20:35:39 +00001122 res_i = vec_all_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001123 res_i = vec_all_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1124 res_i = vec_all_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1125 res_i = vec_all_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1126 res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1127 res_i = vec_all_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001128 res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001129
1130 /* vec_all_nan */
1131 res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1132
1133 /* vec_all_ne */
Chris Lattnerab866b42010-04-14 20:35:39 +00001134 res_i = vec_all_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001135 res_i = vec_all_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1136 res_i = vec_all_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1137 res_i = vec_all_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1138 res_i = vec_all_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1139 res_i = vec_all_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1140 res_i = vec_all_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1141
1142 /* vec_all_nge */
1143 res_i = vec_all_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1144
1145 /* vec_all_ngt */
1146 res_i = vec_all_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1147
1148 /* vec_all_nle */
1149 res_i = vec_all_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1150
1151 /* vec_all_nlt */
1152 res_i = vec_all_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1153
1154 /* vec_all_numeric */
1155 res_i = vec_all_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1156
1157 /* vec_any_eq */
Chris Lattnerab866b42010-04-14 20:35:39 +00001158 res_i = vec_any_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001159 res_i = vec_any_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1160 res_i = vec_any_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1161 res_i = vec_any_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1162 res_i = vec_any_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1163 res_i = vec_any_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1164 res_i = vec_any_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1165
1166 /* vec_any_ge */
Chris Lattnerab866b42010-04-14 20:35:39 +00001167 res_i = vec_any_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001168 res_i = vec_any_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1169 res_i = vec_any_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1170 res_i = vec_any_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1171 res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1172 res_i = vec_any_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001173 res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001174
1175 /* vec_any_gt */
Chris Lattnerab866b42010-04-14 20:35:39 +00001176 res_i = vec_any_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001177 res_i = vec_any_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1178 res_i = vec_any_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1179 res_i = vec_any_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1180 res_i = vec_any_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1181 res_i = vec_any_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1182 res_i = vec_any_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1183
1184 /* vec_any_le */
Chris Lattnerab866b42010-04-14 20:35:39 +00001185 res_i = vec_any_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001186 res_i = vec_any_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1187 res_i = vec_any_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1188 res_i = vec_any_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1189 res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1190 res_i = vec_any_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001191 res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001192
1193 /* vec_any_lt */
Chris Lattnerab866b42010-04-14 20:35:39 +00001194 res_i = vec_any_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001195 res_i = vec_any_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1196 res_i = vec_any_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1197 res_i = vec_any_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1198 res_i = vec_any_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1199 res_i = vec_any_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1200 res_i = vec_any_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1201
1202 /* vec_any_nan */
1203 res_i = vec_any_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1204
1205 /* vec_any_ne */
Chris Lattnerab866b42010-04-14 20:35:39 +00001206 res_i = vec_any_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001207 res_i = vec_any_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1208 res_i = vec_any_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1209 res_i = vec_any_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1210 res_i = vec_any_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1211 res_i = vec_any_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1212 res_i = vec_any_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1213
1214 /* vec_any_nge */
1215 res_i = vec_any_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1216
1217 /* vec_any_ngt */
1218 res_i = vec_any_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1219
1220 /* vec_any_nle */
1221 res_i = vec_any_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1222
1223 /* vec_any_nlt */
1224 res_i = vec_any_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1225
1226 /* vec_any_numeric */
1227 res_i = vec_any_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1228
1229 /* vec_any_out */
1230 res_i = vec_any_out(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1231
1232 return 0;
1233}