blob: 6f65866ae56a7319961801ed96d931ea892212c5 [file] [log] [blame]
Chris Lattnerdad40622010-04-14 03:54:58 +00001// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
2
Chris Lattner3fcc7902010-06-27 01:06:27 +00003// TODO: uncomment
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00004/* vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }; */
Chris Lattner3fcc7902010-06-27 01:06:27 +00005vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
6vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
7// TODO: uncomment
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00008/* vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 }; */
Chris Lattner3fcc7902010-06-27 01:06:27 +00009vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 };
10vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 };
11// TODO: uncomment
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000012/* vector bool int vbi = { 1, 0, 1, 0 }; */
Chris Lattner3fcc7902010-06-27 01:06:27 +000013vector int vi = { -1, 2, -3, 4 };
14vector unsigned int vui = { 1, 2, 3, 4 };
15vector float vf = { -1.5, 2.5, -3.5, 4.5 };
Chris Lattnerdad40622010-04-14 03:54:58 +000016
Chris Lattner3fcc7902010-06-27 01:06:27 +000017// TODO: uncomment
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000018/* vector bool char res_vbc; */
Chris Lattner3fcc7902010-06-27 01:06:27 +000019vector signed char res_vsc;
20vector unsigned char res_vuc;
21// TODO: uncomment
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000022/* vector bool short res_vbs; */
Chris Lattner3fcc7902010-06-27 01:06:27 +000023vector short res_vs;
24vector unsigned short res_vus;
25// TODO: uncomment
26vector pixel res_vp;
27// TODO: uncomment
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000028/* vector bool int res_vbi; */
Chris Lattner3fcc7902010-06-27 01:06:27 +000029vector int res_vi;
30vector unsigned int res_vui;
31vector float res_vf;
Chris Lattnerdad40622010-04-14 03:54:58 +000032
Chris Lattner3fcc7902010-06-27 01:06:27 +000033signed char param_sc;
34unsigned char param_uc;
35short param_s;
36unsigned short param_us;
37int param_i;
38unsigned int param_ui;
39float param_f;
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000040
Chris Lattner3fcc7902010-06-27 01:06:27 +000041int res_i;
42
43int test1() {
44// CHECK: define i32 @test1
Chris Lattnerdad40622010-04-14 03:54:58 +000045
46 /* vec_abs */
Chris Lattner217e056e2010-06-26 20:27:24 +000047 vsc = vec_abs(vsc); // CHECK: sub nsw <16 x i8> zeroinitializer
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000048 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdad40622010-04-14 03:54:58 +000049
Chris Lattner217e056e2010-06-26 20:27:24 +000050 vs = vec_abs(vs); // CHECK: sub nsw <8 x i16> zeroinitializer
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000051 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdad40622010-04-14 03:54:58 +000052
Chris Lattner217e056e2010-06-26 20:27:24 +000053 vi = vec_abs(vi); // CHECK: sub nsw <4 x i32> zeroinitializer
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000054 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdad40622010-04-14 03:54:58 +000055
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000056 vf = vec_abs(vf); // CHECK: and <4 x i32>
Chris Lattnerdad40622010-04-14 03:54:58 +000057
58 /* vec_abs */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000059 vsc = vec_abss(vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
60 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdad40622010-04-14 03:54:58 +000061
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000062 vs = vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs
63 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdad40622010-04-14 03:54:58 +000064
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000065 vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws
66 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdad40622010-04-14 03:54:58 +000067
68 /* vec_add */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000069 res_vsc = vec_add(vsc, vsc); // CHECK: add nsw <16 x i8>
70 res_vuc = vec_add(vuc, vuc); // CHECK: add <16 x i8>
71 res_vs = vec_add(vs, vs); // CHECK: add nsw <8 x i16>
72 res_vus = vec_add(vus, vus); // CHECK: add <8 x i16>
73 res_vi = vec_add(vi, vi); // CHECK: add nsw <4 x i32>
74 res_vui = vec_add(vui, vui); // CHECK: add <4 x i32>
75 res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float>
76 res_vsc = vec_vaddubm(vsc, vsc); // CHECK: add nsw <16 x i8>
Chris Lattnerdad40622010-04-14 03:54:58 +000077 res_vuc = vec_vaddubm(vuc, vuc); // CHECK: add <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000078 res_vs = vec_vadduhm(vs, vs); // CHECK: add nsw <8 x i16>
Chris Lattnerdad40622010-04-14 03:54:58 +000079 res_vus = vec_vadduhm(vus, vus); // CHECK: add <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000080 res_vi = vec_vadduwm(vi, vi); // CHECK: add nsw <4 x i32>
Chris Lattnerdad40622010-04-14 03:54:58 +000081 res_vui = vec_vadduwm(vui, vui); // CHECK: add <4 x i32>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000082 res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float>
Chris Lattnerdad40622010-04-14 03:54:58 +000083
84 /* vec_addc */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000085 res_vui = vec_addc(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
Chris Lattnerdad40622010-04-14 03:54:58 +000086 res_vui = vec_vaddcuw(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
87
88 /* vec_adds */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000089 res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
90 res_vuc = vec_adds(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
91 res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
92 res_vus = vec_adds(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
93 res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
94 res_vui = vec_adds(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
95 res_vsc = vec_vaddsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
Chris Lattnerdad40622010-04-14 03:54:58 +000096 res_vuc = vec_vaddubs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000097 res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
Chris Lattnerdad40622010-04-14 03:54:58 +000098 res_vus = vec_vadduhs(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000099 res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
Chris Lattnerdad40622010-04-14 03:54:58 +0000100 res_vui = vec_vadduws(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
101
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000102 /* vec_and */
103 res_vsc = vec_and(vsc, vsc); // CHECK: and <16 x i8>
104 res_vuc = vec_and(vuc, vuc); // CHECK: and <16 x i8>
105 res_vs = vec_and(vs, vs); // CHECK: and <8 x i16>
106 res_vus = vec_and(vus, vus); // CHECK: and <8 x i16>
107 res_vi = vec_and(vi, vi); // CHECK: and <4 x i32>
108 res_vui = vec_and(vui, vui); // CHECK: and <4 x i32>
109 res_vsc = vec_vand(vsc, vsc); // CHECK: and <16 x i8>
110 res_vuc = vec_vand(vuc, vuc); // CHECK: and <16 x i8>
111 res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16>
112 res_vus = vec_vand(vus, vus); // CHECK: and <8 x i16>
113 res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32>
114 res_vui = vec_vand(vui, vui); // CHECK: and <4 x i32>
Chris Lattnerdad40622010-04-14 03:54:58 +0000115
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000116 /* vec_andc */
117 res_vsc = vec_andc(vsc, vsc); // CHECK: xor <16 x i8>
118 // CHECK: and <16 x i8>
119
120 res_vuc = vec_andc(vuc, vuc); // CHECK: xor <16 x i8>
121 // CHECK: and <16 x i8>
122
123 res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16>
124 // CHECK: and <8 x i16>
125
126 res_vus = vec_andc(vus, vus); // CHECK: xor <8 x i16>
127 // CHECK: and <8 x i16>
128
129 res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32>
130 // CHECK: and <4 x i32>
131
132 res_vui = vec_andc(vui, vui); // CHECK: xor <4 x i32>
133 // CHECK: and <4 x i32>
134
135 res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32>
136 // CHECK: and <4 x i32>
137
138 res_vsc = vec_vandc(vsc, vsc); // CHECK: xor <16 x i8>
139 // CHECK: and <16 x i8>
140
141 res_vuc = vec_vandc(vuc, vuc); // CHECK: xor <16 x i8>
142 // CHECK: and <16 x i8>
143
144 res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16>
145 // CHECK: and <8 x i16>
146
147 res_vus = vec_vandc(vus, vus); // CHECK: xor <8 x i16>
148 // CHECK: and <8 x i16>
149
150 res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32>
151 // CHECK: and <4 x i32>
152
153 res_vui = vec_vandc(vui, vui); // CHECK: xor <4 x i32>
154 // CHECK: and <4 x i32>
155
156 res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32>
157 // CHECK: and <4 x i32>
Chris Lattner3fcc7902010-06-27 01:06:27 +0000158}
Chris Lattnerdad40622010-04-14 03:54:58 +0000159
Chris Lattner3fcc7902010-06-27 01:06:27 +0000160// CHECK: i32 @test2
161int test2() {
Chris Lattnerdad40622010-04-14 03:54:58 +0000162 /* vec_avg */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000163 res_vsc = vec_avg(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vavgsb
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000164 res_vuc = vec_avg(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
165 res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
166 res_vus = vec_avg(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
167 res_vi = vec_avg(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
168 res_vui = vec_avg(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
169 res_vsc = vec_vavgsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
170 res_vuc = vec_vavgub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
Chris Lattnerdad40622010-04-14 03:54:58 +0000171 res_vs = vec_vavgsh(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000172 res_vus = vec_vavguh(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
Chris Lattnerdad40622010-04-14 03:54:58 +0000173 res_vi = vec_vavgsw(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000174 res_vui = vec_vavguw(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
Chris Lattnerdad40622010-04-14 03:54:58 +0000175
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000176 /* vec_ceil */
177 res_vf = vec_ceil(vf); // CHECK: @llvm.ppc.altivec.vrfip
178 res_vf = vec_vrfip(vf); // CHECK: @llvm.ppc.altivec.vrfip
Chris Lattnerdad40622010-04-14 03:54:58 +0000179
180 /* vec_cmpb */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000181 res_vi = vec_cmpb(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
Chris Lattnerdad40622010-04-14 03:54:58 +0000182 res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
183
184 /* vec_cmpeq */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000185 vsc = vec_cmpeq(vsc, vsc); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpequb
186 vuc = vec_cmpeq(vuc, vuc); // CHCK: @llvm.ppc.altivec.vcmpequb
187 vs = vec_cmpeq(vs, vs); // CHCK: @llvm.ppc.altivec.vcmpequh
188 vs = vec_cmpeq(vus, vus); // CHCK: @llvm.ppc.altivec.vcmpequh
189 vi = vec_cmpeq(vi, vi); // CHCK: @llvm.ppc.altivec.vcmpequw
190 vui = vec_cmpeq(vui, vui); // CHCK: @llvm.ppc.altivec.vcmpequw
191 vf = vec_cmpeq(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpeqfp
Chris Lattnerdad40622010-04-14 03:54:58 +0000192
193 /* vec_cmpge */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000194 vf = vec_cmpge(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpgefp
195 vf = vec_vcmpgefp(vf, vf); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
Chris Lattnerdad40622010-04-14 03:54:58 +0000196
Chris Lattner3fcc7902010-06-27 01:06:27 +0000197}
198
199// CHECK: define i32 @test5
200int test5() {
201
Chris Lattnerdad40622010-04-14 03:54:58 +0000202 /* vec_cmpgt */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000203 vsc = vec_cmpgt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
204 vuc = vec_cmpgt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
205 vs = vec_cmpgt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
206 vus = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
207 vi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
208 vui = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
209 vf = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
210 vsc = vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
211 vuc = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
212 vs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
213 vus = vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
214 vi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
215 vui = vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
216 vf = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
Chris Lattnerdad40622010-04-14 03:54:58 +0000217
218 /* vec_cmple */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000219 vf = vec_cmple(vf, vf); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
220}
Chris Lattnerdad40622010-04-14 03:54:58 +0000221
Chris Lattner3fcc7902010-06-27 01:06:27 +0000222// CHECK: define i32 @test6
223int test6() {
Chris Lattnerdad40622010-04-14 03:54:58 +0000224 /* vec_cmplt */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000225 vsc =vec_cmplt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
226 vsc =vec_cmplt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
227 vs = vec_cmplt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
228 vs = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
229 vi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
230 vui = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
231 vf = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000232
233 /* vec_ctf */
234 res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx
235 res_vf = vec_ctf(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
236 res_vf = vec_vcfsx(vi, 0); // CHECK: @llvm.ppc.altivec.vcfsx
237 res_vf = vec_vcfux(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
238
239 /* vec_cts */
240 res_vi = vec_cts(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
241 res_vi = vec_vctsxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
242
243 /* vec_ctu */
244 res_vui = vec_ctu(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
245 res_vui = vec_vctuxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
246
247 /* vec_dss */
248 vec_dss(param_i); // CHECK: @llvm.ppc.altivec.dss
249
250 /* vec_dssall */
251 vec_dssall(); // CHECK: @llvm.ppc.altivec.dssall
252
253 /* vec_dst */
254 vec_dst(&vsc, 0, 0); // CHECK: @llvm.ppc.altivec.dst
255
256 /* vec_dstst */
257 vec_dstst(&vs, 0, 0); // CHECK: @llvm.ppc.altivec.dstst
258
259 /* vec_dststt */
260 vec_dststt(&param_i, 0, 0); // CHECK: @llvm.ppc.altivec.dststt
261
262 /* vec_dstt */
263 vec_dstt(&vf, 0, 0); // CHECK: @llvm.ppc.altivec.dstt
264
265 /* vec_expte */
266 res_vf = vec_expte(vf); // CHECK: @llvm.ppc.altivec.vexptefp
267 res_vf = vec_vexptefp(vf); // CHECK: @llvm.ppc.altivec.vexptefp
268
269 /* vec_floor */
270 res_vf = vec_floor(vf); // CHECK: @llvm.ppc.altivec.vrfim
271 res_vf = vec_vrfim(vf); // CHECK: @llvm.ppc.altivec.vrfim
272
273 /* vec_ld */
274 res_vsc = vec_ld(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
275 res_vsc = vec_ld(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
276 res_vuc = vec_ld(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
277 res_vuc = vec_ld(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
278 res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
279 res_vs = vec_ld(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
280 res_vus = vec_ld(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
281 res_vus = vec_ld(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
282 res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
283 res_vi = vec_ld(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
284 res_vui = vec_ld(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
285 res_vui = vec_ld(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
286 res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
287 res_vf = vec_ld(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
288 res_vsc = vec_lvx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
289 res_vsc = vec_lvx(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
290 res_vuc = vec_lvx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
291 res_vuc = vec_lvx(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
292 res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
293 res_vs = vec_lvx(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
294 res_vus = vec_lvx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
295 res_vus = vec_lvx(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
296 res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
297 res_vi = vec_lvx(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
298 res_vui = vec_lvx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
299 res_vui = vec_lvx(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
300 res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
301 res_vf = vec_lvx(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
302
303 /* vec_lde */
304 res_vsc = vec_lde(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
305 res_vuc = vec_lde(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
306 res_vs = vec_lde(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
307 res_vus = vec_lde(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
308 res_vi = vec_lde(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
309 res_vui = vec_lde(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
310 res_vf = vec_lde(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
311 res_vsc = vec_lvebx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
312 res_vuc = vec_lvebx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
313 res_vs = vec_lvehx(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
314 res_vus = vec_lvehx(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
315 res_vi = vec_lvewx(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
316 res_vui = vec_lvewx(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
317 res_vf = vec_lvewx(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
318
319 /* vec_ldl */
320 res_vsc = vec_ldl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
321 res_vsc = vec_ldl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
322 res_vuc = vec_ldl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
323 res_vuc = vec_ldl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
324 res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
325 res_vs = vec_ldl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
326 res_vus = vec_ldl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
327 res_vus = vec_ldl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
328 res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
329 res_vi = vec_ldl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
330 res_vui = vec_ldl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
331 res_vui = vec_ldl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
332 res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
333 res_vf = vec_ldl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
334 res_vsc = vec_lvxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
335 res_vsc = vec_lvxl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
336 res_vuc = vec_lvxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
337 res_vuc = vec_lvxl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
338 res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
339 res_vs = vec_lvxl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
340 res_vus = vec_lvxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
341 res_vus = vec_lvxl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
342 res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
343 res_vi = vec_lvxl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
344 res_vui = vec_lvxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
345 res_vui = vec_lvxl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
346 res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
347 res_vf = vec_lvxl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
348
349 /* vec_loge */
350 res_vf = vec_loge(vf); // CHECK: @llvm.ppc.altivec.vlogefp
351 res_vf = vec_vlogefp(vf); // CHECK: @llvm.ppc.altivec.vlogefp
352
353 /* vec_lvsl */
354 res_vuc = vec_lvsl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsl
355
356 /* vec_lvsr */
357 res_vuc = vec_lvsr(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsr
358
359 /* vec_madd */
360 res_vf =vec_madd(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
361 res_vf = vec_vmaddfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
362
363 /* vec_madds */
364 res_vs = vec_madds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
365 res_vs = vec_vmhaddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
Chris Lattnerdad40622010-04-14 03:54:58 +0000366
367 /* vec_max */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000368 res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
369 res_vuc = vec_max(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
370 res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdad40622010-04-14 03:54:58 +0000371 res_vus = vec_max(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000372 res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
373 res_vui = vec_max(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
374 res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
375 res_vsc = vec_vmaxsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
376 res_vuc = vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
377 res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
378 res_vus = vec_vmaxuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
379 res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdad40622010-04-14 03:54:58 +0000380 res_vui = vec_vmaxuw(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000381 res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
382
383 /* vec_mergeh */
384 res_vsc = vec_mergeh(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
385 res_vuc = vec_mergeh(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
386 res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
387 res_vus = vec_mergeh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
388 res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
389 res_vui = vec_mergeh(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
390 res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
391 res_vsc = vec_vmrghb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
392 res_vuc = vec_vmrghb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
393 res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
394 res_vus = vec_vmrghh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
395 res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
396 res_vui = vec_vmrghw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
397 res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
398
399 /* vec_mergel */
400 res_vsc = vec_mergel(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
401 res_vuc = vec_mergel(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
402 res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
403 res_vus = vec_mergel(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
404 res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
405 res_vui = vec_mergel(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
406 res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
407 res_vsc = vec_vmrglb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
408 res_vuc = vec_vmrglb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
409 res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
410 res_vus = vec_vmrglh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
411 res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
412 res_vui = vec_vmrglw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
413 res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
Chris Lattnerdad40622010-04-14 03:54:58 +0000414
415 /* vec_mfvscr */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000416 vus = vec_mfvscr(); // CHECK: @llvm.ppc.altivec.mfvscr
Chris Lattnerdad40622010-04-14 03:54:58 +0000417
418 /* vec_min */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000419 res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
420 res_vuc = vec_min(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
421 res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
Chris Lattnerdad40622010-04-14 03:54:58 +0000422 res_vus = vec_min(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000423 res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
424 res_vui = vec_min(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
425 res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
426 res_vsc = vec_vminsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
427 res_vuc = vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
428 res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
429 res_vus = vec_vminuh(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
430 res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
Chris Lattnerdad40622010-04-14 03:54:58 +0000431 res_vui = vec_vminuw(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000432 res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
433
434 /* vec_mladd */
435 res_vus = vec_mladd(vus, vus, vus); // CHECK: mul <8 x i16>
436 // CHECK: add <8 x i16>
437
Chris Lattner51924e512010-06-26 21:25:03 +0000438 res_vs = vec_mladd(vus, vs, vs); // CHECK: mul nsw <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000439 // CHECK: add nsw <8 x i16>
440
Chris Lattner51924e512010-06-26 21:25:03 +0000441 res_vs = vec_mladd(vs, vus, vus); // CHECK: mul nsw <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000442 // CHECK: add nsw <8 x i16>
443
Chris Lattner51924e512010-06-26 21:25:03 +0000444 res_vs = vec_mladd(vs, vs, vs); // CHECK: mul nsw <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000445 // CHECK: add nsw <8 x i16>
446
447 /* vec_mradds */
448 res_vs = vec_mradds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
449 res_vs = vec_vmhraddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
450
451 /* vec_msum */
452 res_vi = vec_msum(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
453 res_vui = vec_msum(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
454 res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
455 res_vui = vec_msum(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
456 res_vi = vec_vmsummbm(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
457 res_vui = vec_vmsumubm(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
458 res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
459 res_vui = vec_vmsumuhm(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
460
461 /* vec_msums */
462 res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
463 res_vui = vec_msums(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
464 res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
465 res_vui = vec_vmsumuhs(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
Chris Lattnerdad40622010-04-14 03:54:58 +0000466
467 /* vec_mtvscr */
Chris Lattnerdd6697b2010-04-14 20:35:39 +0000468 vec_mtvscr(vsc); // CHECK: @llvm.ppc.altivec.mtvscr
Chris Lattnerdad40622010-04-14 03:54:58 +0000469
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000470 /* vec_mule */
471 res_vs = vec_mule(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
472 res_vus = vec_mule(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
473 res_vi = vec_mule(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
474 res_vui = vec_mule(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
475 res_vs = vec_vmulesb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
476 res_vus = vec_vmuleub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
477 res_vi = vec_vmulesh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
478 res_vui = vec_vmuleuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
Chris Lattnerdad40622010-04-14 03:54:58 +0000479
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000480 /* vec_mulo */
481 res_vs = vec_mulo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
482 res_vus = vec_mulo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
483 res_vi = vec_mulo(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
484 res_vui = vec_mulo(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
485 res_vs = vec_vmulosb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
486 res_vus = vec_vmuloub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
487 res_vi = vec_vmulosh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
488 res_vui = vec_vmulouh(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
489
490 /* vec_nmsub */
491 res_vf = vec_nmsub(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
492 res_vf = vec_vnmsubfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
493
494 /* vec_nor */
495 res_vsc = vec_nor(vsc, vsc); // CHECK: or <16 x i8>
496 // CHECK: xor <16 x i8>
497
498 res_vuc = vec_nor(vuc, vuc); // CHECK: or <16 x i8>
499 // CHECK: xor <16 x i8>
500
501 res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16>
502 // CHECK: xor <8 x i16>
503
504 res_vus = vec_nor(vus, vus); // CHECK: or <8 x i16>
505 // CHECK: xor <8 x i16>
506
507 res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32>
508 // CHECK: xor <4 x i32>
509
510 res_vui = vec_nor(vui, vui); // CHECK: or <4 x i32>
511 // CHECK: xor <4 x i32>
512
513 res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32>
514 // CHECK: xor <4 x i32>
515
516 res_vsc = vec_vnor(vsc, vsc); // CHECK: or <16 x i8>
517 // CHECK: xor <16 x i8>
518
519 res_vuc = vec_vnor(vuc, vuc); // CHECK: or <16 x i8>
520 // CHECK: xor <16 x i8>
521
522 res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16>
523 // CHECK: xor <8 x i16>
524
525 res_vus = vec_vnor(vus, vus); // CHECK: or <8 x i16>
526 // CHECK: xor <8 x i16>
527
528 res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32>
529 // CHECK: xor <4 x i32>
530
531 res_vui = vec_vnor(vui, vui); // CHECK: or <4 x i32>
532 // CHECK: xor <4 x i32>
533
534 res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32>
535 // CHECK: xor <4 x i32>
536
537 /* vec_or */
538 res_vsc = vec_or(vsc, vsc); // CHECK: or <16 x i8>
539 res_vuc = vec_or(vuc, vuc); // CHECK: or <16 x i8>
540 res_vs = vec_or(vs, vs); // CHECK: or <8 x i16>
541 res_vus = vec_or(vus, vus); // CHECK: or <8 x i16>
542 res_vi = vec_or(vi, vi); // CHECK: or <4 x i32>
543 res_vui = vec_or(vui, vui); // CHECK: or <4 x i32>
544 res_vf = vec_or(vf, vf); // CHECK: or <4 x i32>
545 res_vsc = vec_vor(vsc, vsc); // CHECK: or <16 x i8>
546 res_vuc = vec_vor(vuc, vuc); // CHECK: or <16 x i8>
547 res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16>
548 res_vus = vec_vor(vus, vus); // CHECK: or <8 x i16>
549 res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32>
550 res_vui = vec_vor(vui, vui); // CHECK: or <4 x i32>
551 res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32>
552
553 /* vec_pack */
554 res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
555 res_vuc = vec_pack(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
556 res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
557 res_vus = vec_pack(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
558 res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
559 res_vuc = vec_vpkuhum(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
560 res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
561 res_vus = vec_vpkuwum(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
562
563 /* vec_packpx */
564 res_vp = vec_packpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
565 res_vp = vec_vpkpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
566
567 /* vec_packs */
568 res_vsc = vec_packs(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
569 res_vuc = vec_packs(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
570 res_vs = vec_packs(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
571 res_vus = vec_packs(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
572 res_vsc = vec_vpkshss(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
573 res_vuc = vec_vpkuhus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
574 res_vs = vec_vpkswss(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
575 res_vus = vec_vpkuwus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
576
577 /* vec_packsu */
578 res_vuc = vec_packsu(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
579 res_vuc = vec_packsu(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
580 res_vus = vec_packsu(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
581 res_vus = vec_packsu(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
582 res_vuc = vec_vpkshus(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
583 res_vuc = vec_vpkshus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
584 res_vus = vec_vpkswus(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
585 res_vus = vec_vpkswus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
586
587 /* vec_perm */
588 res_vsc = vec_perm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
589 res_vuc = vec_perm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
590 res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
591 res_vus = vec_perm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
592 res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
593 res_vui = vec_perm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
594 res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
595 res_vsc = vec_vperm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
596 res_vuc = vec_vperm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
597 res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
598 res_vus = vec_vperm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
599 res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
600 res_vui = vec_vperm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
601 res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
602
603 /* vec_re */
604 res_vf = vec_re(vf); // CHECK: @llvm.ppc.altivec.vrefp
605 res_vf = vec_vrefp(vf); // CHECK: @llvm.ppc.altivec.vrefp
606
607 /* vec_rl */
608 res_vsc = vec_rl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
609 res_vuc = vec_rl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
610 res_vs = vec_rl(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
611 res_vus = vec_rl(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
612 res_vi = vec_rl(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
613 res_vui = vec_rl(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
614 res_vsc = vec_vrlb(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
615 res_vuc = vec_vrlb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
616 res_vs = vec_vrlh(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
617 res_vus = vec_vrlh(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
618 res_vi = vec_vrlw(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
619 res_vui = vec_vrlw(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
620
621 /* vec_round */
622 res_vf = vec_round(vf); // CHECK: @llvm.ppc.altivec.vrfin
623 res_vf = vec_vrfin(vf); // CHECK: @llvm.ppc.altivec.vrfin
624
625 /* vec_rsqrte */
626 res_vf = vec_rsqrte(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
627 res_vf = vec_vrsqrtefp(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
628
629 /* vec_sel */
630 res_vsc = vec_sel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
631 // CHECK: and <16 x i8>
632 // CHECK: and <16 x i8>
633 // CHECK: or <16 x i8>
634
635 res_vuc = vec_sel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
636 // CHECK: and <16 x i8>
637 // CHECK: and <16 x i8>
638 // CHECK: or <16 x i8>
639
640 res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16>
641 // CHECK: and <8 x i16>
642 // CHECK: and <8 x i16>
643 // CHECK: or <8 x i16>
644
645
646 res_vus = vec_sel(vus, vus, vus); // CHECK: xor <8 x i16>
647 // CHECK: and <8 x i16>
648 // CHECK: and <8 x i16>
649 // CHECK: or <8 x i16>
650
651 res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32>
652 // CHECK: and <4 x i32>
653 // CHECK: and <4 x i32>
654 // CHECK: or <4 x i32>
655
656
657 res_vui = vec_sel(vui, vui, vui); // CHECK: xor <4 x i32>
658 // CHECK: and <4 x i32>
659 // CHECK: and <4 x i32>
660 // CHECK: or <4 x i32>
661
662
663 res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32>
664 // CHECK: and <4 x i32>
665 // CHECK: and <4 x i32>
666 // CHECK: or <4 x i32>
667
668 res_vsc = vec_vsel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
669 // CHECK: and <16 x i8>
670 // CHECK: and <16 x i8>
671 // CHECK: or <16 x i8>
672
673 res_vuc = vec_vsel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
674 // CHECK: and <16 x i8>
675 // CHECK: and <16 x i8>
676 // CHECK: or <16 x i8>
677
678 res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16>
679 // CHECK: and <8 x i16>
680 // CHECK: and <8 x i16>
681 // CHECK: or <8 x i16>
682
683
684 res_vus = vec_vsel(vus, vus, vus); // CHECK: xor <8 x i16>
685 // CHECK: and <8 x i16>
686 // CHECK: and <8 x i16>
687 // CHECK: or <8 x i16>
688
689 res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32>
690 // CHECK: and <4 x i32>
691 // CHECK: and <4 x i32>
692 // CHECK: or <4 x i32>
693
694
695 res_vui = vec_vsel(vui, vui, vui); // CHECK: xor <4 x i32>
696 // CHECK: and <4 x i32>
697 // CHECK: and <4 x i32>
698 // CHECK: or <4 x i32>
699
700
701 res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32>
702 // CHECK: and <4 x i32>
703 // CHECK: and <4 x i32>
704 // CHECK: or <4 x i32>
705
706
707 /* vec_sl */
708 res_vsc = vec_sl(vsc, vuc); // CHECK: shl <16 x i8>
709 res_vuc = vec_sl(vuc, vuc); // CHECK: shl <16 x i8>
710 res_vs = vec_sl(vs, vus); // CHECK: shl <8 x i16>
711 res_vus = vec_sl(vus, vus); // CHECK: shl <8 x i16>
712 res_vi = vec_sl(vi, vui); // CHECK: shl <4 x i32>
713 res_vui = vec_sl(vui, vui); // CHECK: shl <4 x i32>
714 res_vsc = vec_vslb(vsc, vuc); // CHECK: shl <16 x i8>
715 res_vuc = vec_vslb(vuc, vuc); // CHECK: shl <16 x i8>
716 res_vs = vec_vslh(vs, vus); // CHECK: shl <8 x i16>
717 res_vus = vec_vslh(vus, vus); // CHECK: shl <8 x i16>
718 res_vi = vec_vslw(vi, vui); // CHECK: shl <4 x i32>
719 res_vui = vec_vslw(vui, vui); // CHECK: shl <4 x i32>
720
721 /* vec_sld */
722 res_vsc = vec_sld(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
723 res_vuc = vec_sld(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
724 res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
725 res_vus = vec_sld(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
726 res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
727 res_vui = vec_sld(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
728 res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
729 res_vsc = vec_vsldoi(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
730 res_vuc = vec_vsldoi(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
731 res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
732 res_vus = vec_vsldoi(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
733 res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
734 res_vui = vec_vsldoi(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
735 res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
736
737 /* vec_sll */
738 res_vsc = vec_sll(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
739 res_vsc = vec_sll(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
740 res_vsc = vec_sll(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
741 res_vuc = vec_sll(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
742 res_vuc = vec_sll(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
743 res_vuc = vec_sll(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
744 res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
745 res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
746 res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
747 res_vus = vec_sll(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
748 res_vus = vec_sll(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
749 res_vus = vec_sll(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
750 res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
751 res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
752 res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
753 res_vui = vec_sll(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
754 res_vui = vec_sll(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
755 res_vui = vec_sll(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
756 res_vsc = vec_vsl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
757 res_vsc = vec_vsl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
758 res_vsc = vec_vsl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
759 res_vuc = vec_vsl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
760 res_vuc = vec_vsl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
761 res_vuc = vec_vsl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
762 res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
763 res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
764 res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
765 res_vus = vec_vsl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
766 res_vus = vec_vsl(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
767 res_vus = vec_vsl(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
768 res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
769 res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
770 res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
771 res_vui = vec_vsl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
772 res_vui = vec_vsl(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
773 res_vui = vec_vsl(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
774
775 /* vec_slo */
776 res_vsc = vec_slo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
777 res_vsc = vec_slo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
778 res_vuc = vec_slo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
779 res_vuc = vec_slo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
780 res_vs = vec_slo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
781 res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
782 res_vus = vec_slo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
783 res_vus = vec_slo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
784 res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
785 res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
786 res_vui = vec_slo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
787 res_vui = vec_slo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
788 res_vf = vec_slo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
789 res_vf = vec_slo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
790 res_vsc = vec_vslo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
791 res_vsc = vec_vslo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
792 res_vuc = vec_vslo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
793 res_vuc = vec_vslo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
794 res_vs = vec_vslo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
795 res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
796 res_vus = vec_vslo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
797 res_vus = vec_vslo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
798 res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
799 res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
800 res_vui = vec_vslo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
801 res_vui = vec_vslo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
802 res_vf = vec_vslo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
803 res_vf = vec_vslo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
804
805 /* vec_splat */
806 res_vsc = vec_splat(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
807 res_vuc = vec_splat(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
808 res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
809 res_vus = vec_splat(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
810 res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
811 res_vui = vec_splat(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
812 res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
813 res_vsc = vec_vspltb(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
814 res_vuc = vec_vspltb(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
815 res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
816 res_vus = vec_vsplth(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
817 res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
818 res_vui = vec_vspltw(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
819 res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
820
821 /* vec_splat_s8 */
822 res_vsc = vec_splat_s8(0x09); // TODO: add check
823 res_vsc = vec_vspltisb(0x09); // TODO: add check
824
825 /* vec_splat_s16 */
826 res_vs = vec_splat_s16(0x09); // TODO: add check
827 res_vs = vec_vspltish(0x09); // TODO: add check
828
829 /* vec_splat_s32 */
830 res_vi = vec_splat_s32(0x09); // TODO: add check
831 res_vi = vec_vspltisw(0x09); // TODO: add check
832
833 /* vec_splat_u8 */
834 res_vuc = vec_splat_u8(0x09); // TODO: add check
835
836 /* vec_splat_u16 */
837 res_vus = vec_splat_u16(0x09); // TODO: add check
838
839 /* vec_splat_u32 */
840 res_vui = vec_splat_u32(0x09); // TODO: add check
841
842 /* vec_sr */
843 res_vsc = vec_sr(vsc, vuc); // CHECK: shr <16 x i8>
844 res_vuc = vec_sr(vuc, vuc); // CHECK: shr <16 x i8>
845 res_vs = vec_sr(vs, vus); // CHECK: shr <8 x i16>
846 res_vus = vec_sr(vus, vus); // CHECK: shr <8 x i16>
847 res_vi = vec_sr(vi, vui); // CHECK: shr <4 x i32>
848 res_vui = vec_sr(vui, vui); // CHECK: shr <4 x i32>
849 res_vsc = vec_vsrb(vsc, vuc); // CHECK: shr <16 x i8>
850 res_vuc = vec_vsrb(vuc, vuc); // CHECK: shr <16 x i8>
851 res_vs = vec_vsrh(vs, vus); // CHECK: shr <8 x i16>
852 res_vus = vec_vsrh(vus, vus); // CHECK: shr <8 x i16>
853 res_vi = vec_vsrw(vi, vui); // CHECK: shr <4 x i32>
854 res_vui = vec_vsrw(vui, vui); // CHECK: shr <4 x i32>
855
856 /* vec_sra */
857 res_vsc = vec_sra(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
858 res_vuc = vec_sra(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
859 res_vs = vec_sra(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
860 res_vus = vec_sra(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
861 res_vi = vec_sra(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
862 res_vui = vec_sra(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
863 res_vsc = vec_vsrab(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
864 res_vuc = vec_vsrab(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
865 res_vs = vec_vsrah(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
866 res_vus = vec_vsrah(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
867 res_vi = vec_vsraw(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
868 res_vui = vec_vsraw(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
869
870 /* vec_srl */
871 res_vsc = vec_srl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
872 res_vsc = vec_srl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
873 res_vsc = vec_srl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
874 res_vuc = vec_srl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
875 res_vuc = vec_srl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
876 res_vuc = vec_srl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
877 res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
878 res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
879 res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
880 res_vus = vec_srl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
881 res_vus = vec_srl(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
882 res_vus = vec_srl(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
883 res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
884 res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
885 res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
886 res_vui = vec_srl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
887 res_vui = vec_srl(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
888 res_vui = vec_srl(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
889 res_vsc = vec_vsr(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
890 res_vsc = vec_vsr(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
891 res_vsc = vec_vsr(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
892 res_vuc = vec_vsr(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
893 res_vuc = vec_vsr(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
894 res_vuc = vec_vsr(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
895 res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
896 res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
897 res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
898 res_vus = vec_vsr(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
899 res_vus = vec_vsr(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
900 res_vus = vec_vsr(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
901 res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
902 res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
903 res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
904 res_vui = vec_vsr(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
905 res_vui = vec_vsr(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
906 res_vui = vec_vsr(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
907
908 /* vec_sro */
909 res_vsc = vec_sro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
910 res_vsc = vec_sro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
911 res_vuc = vec_sro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
912 res_vuc = vec_sro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
913 res_vs = vec_sro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
914 res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
915 res_vus = vec_sro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
916 res_vus = vec_sro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
917 res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
918 res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
919 res_vui = vec_sro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
920 res_vui = vec_sro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
921 res_vf = vec_sro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
922 res_vf = vec_sro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
923 res_vsc = vec_vsro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
924 res_vsc = vec_vsro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
925 res_vuc = vec_vsro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
926 res_vuc = vec_vsro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
927 res_vs = vec_vsro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
928 res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
929 res_vus = vec_vsro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
930 res_vus = vec_vsro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
931 res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
932 res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
933 res_vui = vec_vsro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
934 res_vui = vec_vsro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
935 res_vf = vec_vsro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
936 res_vf = vec_vsro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
937
938 /* vec_st */
939 vec_st(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
940 vec_st(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
941 vec_st(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
942 vec_st(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
943 vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
944 vec_st(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
945 vec_st(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
946 vec_st(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
947 vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
948 vec_st(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
949 vec_st(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
950 vec_st(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
951 vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
952 vec_st(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
953 vec_stvx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
954 vec_stvx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
955 vec_stvx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
956 vec_stvx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
957 vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
958 vec_stvx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
959 vec_stvx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
960 vec_stvx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
961 vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
962 vec_stvx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
963 vec_stvx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
964 vec_stvx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
965 vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
966 vec_stvx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
967
968 /* vec_ste */
969 vec_ste(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
970 vec_ste(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
971 vec_ste(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
972 vec_ste(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
973 vec_ste(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
974 vec_ste(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
975 vec_ste(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
976 vec_stvebx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
977 vec_stvebx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
978 vec_stvehx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
979 vec_stvehx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
980 vec_stvewx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
981 vec_stvewx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
982 vec_stvewx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
983
984 /* vec_stl */
985 vec_stl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
986 vec_stl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
987 vec_stl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
988 vec_stl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
989 vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
990 vec_stl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
991 vec_stl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
992 vec_stl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
993 vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
994 vec_stl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
995 vec_stl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
996 vec_stl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
997 vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
998 vec_stl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
999 vec_stvxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
1000 vec_stvxl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
1001 vec_stvxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
1002 vec_stvxl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
1003 vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
1004 vec_stvxl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
1005 vec_stvxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
1006 vec_stvxl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1007 vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
1008 vec_stvxl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
1009 vec_stvxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
1010 vec_stvxl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
1011 vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1012 vec_stvxl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1013
1014 /* vec_sub */
1015 res_vsc = vec_sub(vsc, vsc); // CHECK: sub nsw <16 x i8>
1016 res_vuc = vec_sub(vuc, vuc); // CHECK: sub <16 x i8>
1017 res_vs = vec_sub(vs, vs); // CHECK: sub nsw <8 x i16>
1018 res_vus = vec_sub(vus, vus); // CHECK: sub <8 x i16>
1019 res_vi = vec_sub(vi, vi); // CHECK: sub nsw <4 x i32>
1020 res_vui = vec_sub(vui, vui); // CHECK: sub <4 x i32>
1021 res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float>
1022 res_vsc = vec_vsububm(vsc, vsc); // CHECK: sub nsw <16 x i8>
1023 res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8>
1024 res_vs = vec_vsubuhm(vs, vs); // CHECK: sub nsw <8 x i16>
1025 res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16>
1026 res_vi = vec_vsubuwm(vi, vi); // CHECK: sub nsw <4 x i32>
1027 res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32>
1028 res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float>
1029
1030 /* vec_subc */
1031 res_vui = vec_subc(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1032 res_vui = vec_vsubcuw(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1033
1034 /* vec_subs */
1035 res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1036 res_vuc = vec_subs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1037 res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1038 res_vus = vec_subs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1039 res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1040 res_vui = vec_subs(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1041 res_vsc = vec_vsubsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1042 res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1043 res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1044 res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1045 res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1046 res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1047
1048 /* vec_sum4s */
1049 res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1050 res_vui = vec_sum4s(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1051 res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1052 res_vi = vec_vsum4sbs(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1053 res_vui = vec_vsum4ubs(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1054 res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1055
1056 /* vec_sum2s */
1057 res_vi = vec_sum2s(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1058 res_vi = vec_vsum2sws(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1059
1060 /* vec_sums */
1061 res_vi = vec_sums(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1062 res_vi = vec_vsumsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1063
1064 /* vec_trunc */
1065 res_vf = vec_trunc(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1066 res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1067
1068 /* vec_unpackh */
1069 res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1070 res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1071 res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1072 res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1073
1074 /* vec_unpackl */
1075 res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1076 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1077 res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1078 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1079
1080 /* vec_xor */
1081 res_vsc = vec_xor(vsc, vsc); // CHECK: xor <16 x i8>
1082 res_vuc = vec_xor(vuc, vuc); // CHECK: xor <16 x i8>
1083 res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16>
1084 res_vus = vec_xor(vus, vus); // CHECK: xor <8 x i16>
1085 res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32>
1086 res_vui = vec_xor(vui, vui); // CHECK: xor <4 x i32>
1087 res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32>
1088 res_vsc = vec_vxor(vsc, vsc); // CHECK: xor <16 x i8>
1089 res_vuc = vec_vxor(vuc, vuc); // CHECK: xor <16 x i8>
1090 res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16>
1091 res_vus = vec_vxor(vus, vus); // CHECK: xor <8 x i16>
1092 res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32>
1093 res_vui = vec_vxor(vui, vui); // CHECK: xor <4 x i32>
1094 res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32>
1095
1096 /* ------------------------------ predicates -------------------------------------- */
Chris Lattnerdad40622010-04-14 03:54:58 +00001097
1098 /* vec_all_eq */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001099 res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001100 res_i = vec_all_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1101 res_i = vec_all_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1102 res_i = vec_all_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1103 res_i = vec_all_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1104 res_i = vec_all_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1105 res_i = vec_all_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1106
1107 /* vec_all_ge */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001108 res_i = vec_all_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001109 res_i = vec_all_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1110 res_i = vec_all_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1111 res_i = vec_all_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1112 res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1113 res_i = vec_all_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001114 res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001115
1116 /* vec_all_gt */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001117 res_i = vec_all_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001118 res_i = vec_all_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1119 res_i = vec_all_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1120 res_i = vec_all_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1121 res_i = vec_all_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1122 res_i = vec_all_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1123 res_i = vec_all_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1124
1125 /* vec_all_in */
1126 res_i = vec_all_in(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1127
1128 /* vec_all_le */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001129 res_i = vec_all_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001130 res_i = vec_all_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1131 res_i = vec_all_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1132 res_i = vec_all_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1133 res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1134 res_i = vec_all_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001135 res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001136
1137 /* vec_all_nan */
1138 res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1139
1140 /* vec_all_ne */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001141 res_i = vec_all_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001142 res_i = vec_all_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1143 res_i = vec_all_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1144 res_i = vec_all_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1145 res_i = vec_all_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1146 res_i = vec_all_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1147 res_i = vec_all_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1148
1149 /* vec_all_nge */
1150 res_i = vec_all_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1151
1152 /* vec_all_ngt */
1153 res_i = vec_all_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1154
1155 /* vec_all_nle */
1156 res_i = vec_all_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1157
1158 /* vec_all_nlt */
1159 res_i = vec_all_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1160
1161 /* vec_all_numeric */
1162 res_i = vec_all_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1163
1164 /* vec_any_eq */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001165 res_i = vec_any_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001166 res_i = vec_any_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1167 res_i = vec_any_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1168 res_i = vec_any_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1169 res_i = vec_any_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1170 res_i = vec_any_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1171 res_i = vec_any_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1172
1173 /* vec_any_ge */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001174 res_i = vec_any_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001175 res_i = vec_any_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1176 res_i = vec_any_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1177 res_i = vec_any_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1178 res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1179 res_i = vec_any_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001180 res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001181
1182 /* vec_any_gt */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001183 res_i = vec_any_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001184 res_i = vec_any_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1185 res_i = vec_any_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1186 res_i = vec_any_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1187 res_i = vec_any_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1188 res_i = vec_any_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1189 res_i = vec_any_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1190
1191 /* vec_any_le */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001192 res_i = vec_any_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001193 res_i = vec_any_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1194 res_i = vec_any_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1195 res_i = vec_any_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1196 res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1197 res_i = vec_any_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001198 res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001199
1200 /* vec_any_lt */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001201 res_i = vec_any_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001202 res_i = vec_any_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1203 res_i = vec_any_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1204 res_i = vec_any_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1205 res_i = vec_any_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1206 res_i = vec_any_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1207 res_i = vec_any_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1208
1209 /* vec_any_nan */
1210 res_i = vec_any_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1211
1212 /* vec_any_ne */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001213 res_i = vec_any_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001214 res_i = vec_any_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1215 res_i = vec_any_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1216 res_i = vec_any_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1217 res_i = vec_any_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1218 res_i = vec_any_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1219 res_i = vec_any_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1220
1221 /* vec_any_nge */
1222 res_i = vec_any_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1223
1224 /* vec_any_ngt */
1225 res_i = vec_any_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1226
1227 /* vec_any_nle */
1228 res_i = vec_any_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1229
1230 /* vec_any_nlt */
1231 res_i = vec_any_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1232
1233 /* vec_any_numeric */
1234 res_i = vec_any_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1235
1236 /* vec_any_out */
1237 res_i = vec_any_out(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1238
1239 return 0;
1240}