blob: 587e2b405e197112fe4554d57ae985ff89799310 [file] [log] [blame]
Chris Lattnerdad40622010-04-14 03:54:58 +00001// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
2
Anton Yartsev2cc136d2010-08-16 16:22:12 +00003vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
Chris Lattner3fcc7902010-06-27 01:06:27 +00004vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
5vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
Anton Yartsev2cc136d2010-08-16 16:22:12 +00006vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 };
Chris Lattner3fcc7902010-06-27 01:06:27 +00007vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 };
8vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 };
Anton Yartsev9e968982010-08-19 03:00:09 +00009vector pixel vp = { 1, 2, 3, 4, 5, 6, 7, 8 };
Anton Yartsev2cc136d2010-08-16 16:22:12 +000010vector bool int vbi = { 1, 0, 1, 0 };
Chris Lattner3fcc7902010-06-27 01:06:27 +000011vector int vi = { -1, 2, -3, 4 };
12vector unsigned int vui = { 1, 2, 3, 4 };
13vector float vf = { -1.5, 2.5, -3.5, 4.5 };
Chris Lattnerdad40622010-04-14 03:54:58 +000014
Anton Yartsev2cc136d2010-08-16 16:22:12 +000015vector bool char res_vbc;
Chris Lattner3fcc7902010-06-27 01:06:27 +000016vector signed char res_vsc;
17vector unsigned char res_vuc;
Anton Yartsev2cc136d2010-08-16 16:22:12 +000018vector bool short res_vbs;
Chris Lattner3fcc7902010-06-27 01:06:27 +000019vector short res_vs;
20vector unsigned short res_vus;
Chris Lattner3fcc7902010-06-27 01:06:27 +000021vector pixel res_vp;
Anton Yartsev2cc136d2010-08-16 16:22:12 +000022vector bool int res_vbi;
Chris Lattner3fcc7902010-06-27 01:06:27 +000023vector int res_vi;
24vector unsigned int res_vui;
25vector float res_vf;
Chris Lattnerdad40622010-04-14 03:54:58 +000026
Chris Lattner3fcc7902010-06-27 01:06:27 +000027signed char param_sc;
28unsigned char param_uc;
29short param_s;
30unsigned short param_us;
31int param_i;
32unsigned int param_ui;
33float param_f;
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000034
Chris Lattner3fcc7902010-06-27 01:06:27 +000035int res_i;
36
Anton Yartsev2cc136d2010-08-16 16:22:12 +000037// CHECK: define void @test1
38void test1() {
Chris Lattnerdad40622010-04-14 03:54:58 +000039
40 /* vec_abs */
Chris Lattner217e056e2010-06-26 20:27:24 +000041 vsc = vec_abs(vsc); // CHECK: sub nsw <16 x i8> zeroinitializer
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000042 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdad40622010-04-14 03:54:58 +000043
Chris Lattner217e056e2010-06-26 20:27:24 +000044 vs = vec_abs(vs); // CHECK: sub nsw <8 x i16> zeroinitializer
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000045 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdad40622010-04-14 03:54:58 +000046
Chris Lattner217e056e2010-06-26 20:27:24 +000047 vi = vec_abs(vi); // CHECK: sub nsw <4 x i32> zeroinitializer
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000048 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdad40622010-04-14 03:54:58 +000049
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000050 vf = vec_abs(vf); // CHECK: and <4 x i32>
Chris Lattnerdad40622010-04-14 03:54:58 +000051
52 /* vec_abs */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000053 vsc = vec_abss(vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
54 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdad40622010-04-14 03:54:58 +000055
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000056 vs = vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs
57 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdad40622010-04-14 03:54:58 +000058
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000059 vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws
60 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdad40622010-04-14 03:54:58 +000061
62 /* vec_add */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000063 res_vsc = vec_add(vsc, vsc); // CHECK: add nsw <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000064 res_vsc = vec_add(vbc, vsc); // CHECK: add nsw <16 x i8>
65 res_vsc = vec_add(vsc, vbc); // CHECK: add nsw <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000066 res_vuc = vec_add(vuc, vuc); // CHECK: add <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000067 res_vuc = vec_add(vbc, vuc); // CHECK: add <16 x i8>
68 res_vuc = vec_add(vuc, vbc); // CHECK: add <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000069 res_vs = vec_add(vs, vs); // CHECK: add nsw <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000070 res_vs = vec_add(vbs, vs); // CHECK: add nsw <8 x i16>
71 res_vs = vec_add(vs, vbs); // CHECK: add nsw <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000072 res_vus = vec_add(vus, vus); // CHECK: add <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000073 res_vus = vec_add(vbs, vus); // CHECK: add <8 x i16>
74 res_vus = vec_add(vus, vbs); // CHECK: add <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000075 res_vi = vec_add(vi, vi); // CHECK: add nsw <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000076 res_vi = vec_add(vbi, vi); // CHECK: add nsw <4 x i32>
77 res_vi = vec_add(vi, vbi); // CHECK: add nsw <4 x i32>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000078 res_vui = vec_add(vui, vui); // CHECK: add <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000079 res_vui = vec_add(vbi, vui); // CHECK: add <4 x i32>
80 res_vui = vec_add(vui, vbi); // CHECK: add <4 x i32>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000081 res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float>
82 res_vsc = vec_vaddubm(vsc, vsc); // CHECK: add nsw <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000083 res_vsc = vec_vaddubm(vbc, vsc); // CHECK: add nsw <16 x i8>
84 res_vsc = vec_vaddubm(vsc, vbc); // CHECK: add nsw <16 x i8>
Chris Lattnerdad40622010-04-14 03:54:58 +000085 res_vuc = vec_vaddubm(vuc, vuc); // CHECK: add <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000086 res_vuc = vec_vaddubm(vbc, vuc); // CHECK: add <16 x i8>
87 res_vuc = vec_vaddubm(vuc, vbc); // CHECK: add <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000088 res_vs = vec_vadduhm(vs, vs); // CHECK: add nsw <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000089 res_vs = vec_vadduhm(vbs, vs); // CHECK: add nsw <8 x i16>
90 res_vs = vec_vadduhm(vs, vbs); // CHECK: add nsw <8 x i16>
Chris Lattnerdad40622010-04-14 03:54:58 +000091 res_vus = vec_vadduhm(vus, vus); // CHECK: add <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000092 res_vus = vec_vadduhm(vbs, vus); // CHECK: add <8 x i16>
93 res_vus = vec_vadduhm(vus, vbs); // CHECK: add <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +000094 res_vi = vec_vadduwm(vi, vi); // CHECK: add nsw <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000095 res_vi = vec_vadduwm(vbi, vi); // CHECK: add nsw <4 x i32>
96 res_vi = vec_vadduwm(vi, vbi); // CHECK: add nsw <4 x i32>
Chris Lattnerdad40622010-04-14 03:54:58 +000097 res_vui = vec_vadduwm(vui, vui); // CHECK: add <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +000098 res_vui = vec_vadduwm(vbi, vui); // CHECK: add <4 x i32>
99 res_vui = vec_vadduwm(vui, vbi); // CHECK: add <4 x i32>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000100 res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float>
Chris Lattnerdad40622010-04-14 03:54:58 +0000101
102 /* vec_addc */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000103 res_vui = vec_addc(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
Chris Lattnerdad40622010-04-14 03:54:58 +0000104 res_vui = vec_vaddcuw(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
105
106 /* vec_adds */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000107 res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000108 res_vsc = vec_adds(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
109 res_vsc = vec_adds(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000110 res_vuc = vec_adds(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000111 res_vuc = vec_adds(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
112 res_vuc = vec_adds(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000113 res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000114 res_vs = vec_adds(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
115 res_vs = vec_adds(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000116 res_vus = vec_adds(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000117 res_vus = vec_adds(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
118 res_vus = vec_adds(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000119 res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000120 res_vi = vec_adds(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
121 res_vi = vec_adds(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000122 res_vui = vec_adds(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000123 res_vui = vec_adds(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
124 res_vui = vec_adds(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000125 res_vsc = vec_vaddsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000126 res_vsc = vec_vaddsbs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
127 res_vsc = vec_vaddsbs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
Chris Lattnerdad40622010-04-14 03:54:58 +0000128 res_vuc = vec_vaddubs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000129 res_vuc = vec_vaddubs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
130 res_vuc = vec_vaddubs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000131 res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000132 res_vs = vec_vaddshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
133 res_vs = vec_vaddshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
Chris Lattnerdad40622010-04-14 03:54:58 +0000134 res_vus = vec_vadduhs(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000135 res_vus = vec_vadduhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
136 res_vus = vec_vadduhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000137 res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000138 res_vi = vec_vaddsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
139 res_vi = vec_vaddsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
Chris Lattnerdad40622010-04-14 03:54:58 +0000140 res_vui = vec_vadduws(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000141 res_vui = vec_vadduws(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
142 res_vui = vec_vadduws(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
Chris Lattnerdad40622010-04-14 03:54:58 +0000143
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000144 /* vec_and */
145 res_vsc = vec_and(vsc, vsc); // CHECK: and <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000146 res_vsc = vec_and(vbc, vsc); // CHECK: and <16 x i8>
147 res_vsc = vec_and(vsc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000148 res_vuc = vec_and(vuc, vuc); // CHECK: and <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000149 res_vuc = vec_and(vbc, vuc); // CHECK: and <16 x i8>
150 res_vuc = vec_and(vuc, vbc); // CHECK: and <16 x i8>
151 res_vbc = vec_and(vbc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000152 res_vs = vec_and(vs, vs); // CHECK: and <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000153 res_vs = vec_and(vbs, vs); // CHECK: and <8 x i16>
154 res_vs = vec_and(vs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000155 res_vus = vec_and(vus, vus); // CHECK: and <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000156 res_vus = vec_and(vbs, vus); // CHECK: and <8 x i16>
157 res_vus = vec_and(vus, vbs); // CHECK: and <8 x i16>
158 res_vbs = vec_and(vbs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000159 res_vi = vec_and(vi, vi); // CHECK: and <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000160 res_vi = vec_and(vbi, vi); // CHECK: and <4 x i32>
161 res_vi = vec_and(vi, vbi); // CHECK: and <4 x i32>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000162 res_vui = vec_and(vui, vui); // CHECK: and <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000163 res_vui = vec_and(vbi, vui); // CHECK: and <4 x i32>
164 res_vui = vec_and(vui, vbi); // CHECK: and <4 x i32>
165 res_vbi = vec_and(vbi, vbi); // CHECK: and <4 x i32>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000166 res_vsc = vec_vand(vsc, vsc); // CHECK: and <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000167 res_vsc = vec_vand(vbc, vsc); // CHECK: and <16 x i8>
168 res_vsc = vec_vand(vsc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000169 res_vuc = vec_vand(vuc, vuc); // CHECK: and <16 x i8>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000170 res_vuc = vec_vand(vbc, vuc); // CHECK: and <16 x i8>
171 res_vuc = vec_vand(vuc, vbc); // CHECK: and <16 x i8>
172 res_vbc = vec_vand(vbc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000173 res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000174 res_vs = vec_vand(vbs, vs); // CHECK: and <8 x i16>
175 res_vs = vec_vand(vs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000176 res_vus = vec_vand(vus, vus); // CHECK: and <8 x i16>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000177 res_vus = vec_vand(vbs, vus); // CHECK: and <8 x i16>
178 res_vus = vec_vand(vus, vbs); // CHECK: and <8 x i16>
179 res_vbs = vec_vand(vbs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000180 res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000181 res_vi = vec_vand(vbi, vi); // CHECK: and <4 x i32>
182 res_vi = vec_vand(vi, vbi); // CHECK: and <4 x i32>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000183 res_vui = vec_vand(vui, vui); // CHECK: and <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000184 res_vui = vec_vand(vbi, vui); // CHECK: and <4 x i32>
185 res_vui = vec_vand(vui, vbi); // CHECK: and <4 x i32>
186 res_vbi = vec_vand(vbi, vbi); // CHECK: and <4 x i32>
Chris Lattnerdad40622010-04-14 03:54:58 +0000187
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000188 /* vec_andc */
189 res_vsc = vec_andc(vsc, vsc); // CHECK: xor <16 x i8>
190 // CHECK: and <16 x i8>
191
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000192 res_vsc = vec_andc(vbc, vsc); // CHECK: xor <16 x i8>
193 // CHECK: and <16 x i8>
194
195 res_vsc = vec_andc(vsc, vbc); // CHECK: xor <16 x i8>
196 // CHECK: and <16 x i8>
197
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000198 res_vuc = vec_andc(vuc, vuc); // CHECK: xor <16 x i8>
199 // CHECK: and <16 x i8>
200
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000201 res_vuc = vec_andc(vbc, vuc); // CHECK: xor <16 x i8>
202 // CHECK: and <16 x i8>
203
204 res_vuc = vec_andc(vuc, vbc); // CHECK: xor <16 x i8>
205 // CHECK: and <16 x i8>
206
207 res_vbc = vec_andc(vbc, vbc); // CHECK: xor <16 x i8>
208 // CHECK: and <16 x i8>
209
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000210 res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16>
211 // CHECK: and <8 x i16>
212
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000213 res_vs = vec_andc(vbs, vs); // CHECK: xor <8 x i16>
214 // CHECK: and <8 x i16>
215
216 res_vs = vec_andc(vs, vbs); // CHECK: xor <8 x i16>
217 // CHECK: and <8 x i16>
218
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000219 res_vus = vec_andc(vus, vus); // CHECK: xor <8 x i16>
220 // CHECK: and <8 x i16>
221
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000222 res_vus = vec_andc(vbs, vus); // CHECK: xor <8 x i16>
223 // CHECK: and <8 x i16>
224
225 res_vus = vec_andc(vus, vbs); // CHECK: xor <8 x i16>
226 // CHECK: and <8 x i16>
227
228 res_vbs = vec_andc(vbs, vbs); // CHECK: xor <8 x i16>
229 // CHECK: and <8 x i16>
230
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000231 res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32>
232 // CHECK: and <4 x i32>
233
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000234 res_vi = vec_andc(vbi, vi); // CHECK: xor <4 x i32>
235 // CHECK: and <4 x i32>
236
237 res_vi = vec_andc(vi, vbi); // CHECK: xor <4 x i32>
238 // CHECK: and <4 x i32>
239
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000240 res_vui = vec_andc(vui, vui); // CHECK: xor <4 x i32>
241 // CHECK: and <4 x i32>
242
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000243 res_vui = vec_andc(vbi, vui); // CHECK: xor <4 x i32>
244 // CHECK: and <4 x i32>
245
246 res_vui = vec_andc(vui, vbi); // CHECK: xor <4 x i32>
247 // CHECK: and <4 x i32>
248
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000249 res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32>
250 // CHECK: and <4 x i32>
251
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000252 res_vf = vec_andc(vbi, vf); // CHECK: xor <4 x i32>
253 // CHECK: and <4 x i32>
254
255 res_vf = vec_andc(vf, vbi); // CHECK: xor <4 x i32>
256 // CHECK: and <4 x i32>
257
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000258 res_vsc = vec_vandc(vsc, vsc); // CHECK: xor <16 x i8>
259 // CHECK: and <16 x i8>
260
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000261 res_vsc = vec_vandc(vbc, vsc); // CHECK: xor <16 x i8>
262 // CHECK: and <16 x i8>
263
264 res_vsc = vec_vandc(vsc, vbc); // CHECK: xor <16 x i8>
265 // CHECK: and <16 x i8>
266
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000267 res_vuc = vec_vandc(vuc, vuc); // CHECK: xor <16 x i8>
268 // CHECK: and <16 x i8>
269
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000270 res_vuc = vec_vandc(vbc, vuc); // CHECK: xor <16 x i8>
271 // CHECK: and <16 x i8>
272
273 res_vuc = vec_vandc(vuc, vbc); // CHECK: xor <16 x i8>
274 // CHECK: and <16 x i8>
275
276 res_vbc = vec_vandc(vbc, vbc); // CHECK: xor <16 x i8>
277 // CHECK: and <16 x i8>
278
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000279 res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16>
280 // CHECK: and <8 x i16>
281
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000282 res_vs = vec_vandc(vbs, vs); // CHECK: xor <8 x i16>
283 // CHECK: and <8 x i16>
284
285 res_vs = vec_vandc(vs, vbs); // CHECK: xor <8 x i16>
286 // CHECK: and <8 x i16>
287
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000288 res_vus = vec_vandc(vus, vus); // CHECK: xor <8 x i16>
289 // CHECK: and <8 x i16>
290
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000291 res_vus = vec_vandc(vbs, vus); // CHECK: xor <8 x i16>
292 // CHECK: and <8 x i16>
293
294 res_vus = vec_vandc(vus, vbs); // CHECK: xor <8 x i16>
295 // CHECK: and <8 x i16>
296
297 res_vbs = vec_vandc(vbs, vbs); // CHECK: xor <8 x i16>
298 // CHECK: and <8 x i16>
299
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000300 res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32>
301 // CHECK: and <4 x i32>
302
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000303 res_vi = vec_vandc(vbi, vi); // CHECK: xor <4 x i32>
304 // CHECK: and <4 x i32>
305
306 res_vi = vec_vandc(vi, vbi); // CHECK: xor <4 x i32>
307 // CHECK: and <4 x i32>
308
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000309 res_vui = vec_vandc(vui, vui); // CHECK: xor <4 x i32>
310 // CHECK: and <4 x i32>
311
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000312 res_vui = vec_vandc(vbi, vui); // CHECK: xor <4 x i32>
313 // CHECK: and <4 x i32>
314
315 res_vui = vec_vandc(vui, vbi); // CHECK: xor <4 x i32>
316 // CHECK: and <4 x i32>
317
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000318 res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32>
319 // CHECK: and <4 x i32>
Anton Yartsev2cc136d2010-08-16 16:22:12 +0000320
321 res_vf = vec_vandc(vbi, vf); // CHECK: xor <4 x i32>
322 // CHECK: and <4 x i32>
323
324 res_vf = vec_vandc(vf, vbi); // CHECK: xor <4 x i32>
325 // CHECK: and <4 x i32>
Chris Lattner3fcc7902010-06-27 01:06:27 +0000326}
Chris Lattnerdad40622010-04-14 03:54:58 +0000327
Chris Lattner3fcc7902010-06-27 01:06:27 +0000328// CHECK: i32 @test2
329int test2() {
Chris Lattnerdad40622010-04-14 03:54:58 +0000330 /* vec_avg */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000331 res_vsc = vec_avg(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vavgsb
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000332 res_vuc = vec_avg(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
333 res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
334 res_vus = vec_avg(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
335 res_vi = vec_avg(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
336 res_vui = vec_avg(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
337 res_vsc = vec_vavgsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
338 res_vuc = vec_vavgub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
Chris Lattnerdad40622010-04-14 03:54:58 +0000339 res_vs = vec_vavgsh(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000340 res_vus = vec_vavguh(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
Chris Lattnerdad40622010-04-14 03:54:58 +0000341 res_vi = vec_vavgsw(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000342 res_vui = vec_vavguw(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
Chris Lattnerdad40622010-04-14 03:54:58 +0000343
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000344 /* vec_ceil */
345 res_vf = vec_ceil(vf); // CHECK: @llvm.ppc.altivec.vrfip
346 res_vf = vec_vrfip(vf); // CHECK: @llvm.ppc.altivec.vrfip
Chris Lattnerdad40622010-04-14 03:54:58 +0000347
348 /* vec_cmpb */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000349 res_vi = vec_cmpb(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
Chris Lattnerdad40622010-04-14 03:54:58 +0000350 res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
351
352 /* vec_cmpeq */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000353 vsc = vec_cmpeq(vsc, vsc); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpequb
354 vuc = vec_cmpeq(vuc, vuc); // CHCK: @llvm.ppc.altivec.vcmpequb
355 vs = vec_cmpeq(vs, vs); // CHCK: @llvm.ppc.altivec.vcmpequh
356 vs = vec_cmpeq(vus, vus); // CHCK: @llvm.ppc.altivec.vcmpequh
357 vi = vec_cmpeq(vi, vi); // CHCK: @llvm.ppc.altivec.vcmpequw
358 vui = vec_cmpeq(vui, vui); // CHCK: @llvm.ppc.altivec.vcmpequw
359 vf = vec_cmpeq(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpeqfp
Chris Lattnerdad40622010-04-14 03:54:58 +0000360
361 /* vec_cmpge */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000362 vf = vec_cmpge(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpgefp
363 vf = vec_vcmpgefp(vf, vf); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
Chris Lattnerdad40622010-04-14 03:54:58 +0000364
Chris Lattner3fcc7902010-06-27 01:06:27 +0000365}
366
367// CHECK: define i32 @test5
368int test5() {
369
Chris Lattnerdad40622010-04-14 03:54:58 +0000370 /* vec_cmpgt */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000371 vsc = vec_cmpgt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
372 vuc = vec_cmpgt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
373 vs = vec_cmpgt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
374 vus = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
375 vi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
376 vui = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
377 vf = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
378 vsc = vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
379 vuc = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
380 vs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
381 vus = vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
382 vi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
383 vui = vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
384 vf = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
Chris Lattnerdad40622010-04-14 03:54:58 +0000385
386 /* vec_cmple */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000387 vf = vec_cmple(vf, vf); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
388}
Chris Lattnerdad40622010-04-14 03:54:58 +0000389
Chris Lattner3fcc7902010-06-27 01:06:27 +0000390// CHECK: define i32 @test6
391int test6() {
Chris Lattnerdad40622010-04-14 03:54:58 +0000392 /* vec_cmplt */
Chris Lattner3fcc7902010-06-27 01:06:27 +0000393 vsc =vec_cmplt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
394 vsc =vec_cmplt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
395 vs = vec_cmplt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
396 vs = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
397 vi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
398 vui = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
399 vf = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000400
401 /* vec_ctf */
402 res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx
403 res_vf = vec_ctf(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
404 res_vf = vec_vcfsx(vi, 0); // CHECK: @llvm.ppc.altivec.vcfsx
405 res_vf = vec_vcfux(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
406
407 /* vec_cts */
408 res_vi = vec_cts(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
409 res_vi = vec_vctsxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
410
411 /* vec_ctu */
412 res_vui = vec_ctu(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
413 res_vui = vec_vctuxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
414
415 /* vec_dss */
416 vec_dss(param_i); // CHECK: @llvm.ppc.altivec.dss
417
418 /* vec_dssall */
419 vec_dssall(); // CHECK: @llvm.ppc.altivec.dssall
420
421 /* vec_dst */
422 vec_dst(&vsc, 0, 0); // CHECK: @llvm.ppc.altivec.dst
423
424 /* vec_dstst */
425 vec_dstst(&vs, 0, 0); // CHECK: @llvm.ppc.altivec.dstst
426
427 /* vec_dststt */
428 vec_dststt(&param_i, 0, 0); // CHECK: @llvm.ppc.altivec.dststt
429
430 /* vec_dstt */
431 vec_dstt(&vf, 0, 0); // CHECK: @llvm.ppc.altivec.dstt
432
433 /* vec_expte */
434 res_vf = vec_expte(vf); // CHECK: @llvm.ppc.altivec.vexptefp
435 res_vf = vec_vexptefp(vf); // CHECK: @llvm.ppc.altivec.vexptefp
436
437 /* vec_floor */
438 res_vf = vec_floor(vf); // CHECK: @llvm.ppc.altivec.vrfim
439 res_vf = vec_vrfim(vf); // CHECK: @llvm.ppc.altivec.vrfim
440
441 /* vec_ld */
442 res_vsc = vec_ld(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
443 res_vsc = vec_ld(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
444 res_vuc = vec_ld(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
445 res_vuc = vec_ld(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
446 res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
447 res_vs = vec_ld(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
448 res_vus = vec_ld(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
449 res_vus = vec_ld(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
450 res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
451 res_vi = vec_ld(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
452 res_vui = vec_ld(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
453 res_vui = vec_ld(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
454 res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
455 res_vf = vec_ld(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
456 res_vsc = vec_lvx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
457 res_vsc = vec_lvx(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
458 res_vuc = vec_lvx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
459 res_vuc = vec_lvx(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
460 res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
461 res_vs = vec_lvx(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
462 res_vus = vec_lvx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
463 res_vus = vec_lvx(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
464 res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
465 res_vi = vec_lvx(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
466 res_vui = vec_lvx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
467 res_vui = vec_lvx(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
468 res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
469 res_vf = vec_lvx(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
470
471 /* vec_lde */
472 res_vsc = vec_lde(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
473 res_vuc = vec_lde(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
474 res_vs = vec_lde(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
475 res_vus = vec_lde(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
476 res_vi = vec_lde(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
477 res_vui = vec_lde(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
478 res_vf = vec_lde(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
479 res_vsc = vec_lvebx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
480 res_vuc = vec_lvebx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
481 res_vs = vec_lvehx(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
482 res_vus = vec_lvehx(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
483 res_vi = vec_lvewx(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
484 res_vui = vec_lvewx(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
485 res_vf = vec_lvewx(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
486
487 /* vec_ldl */
488 res_vsc = vec_ldl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
489 res_vsc = vec_ldl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
490 res_vuc = vec_ldl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
491 res_vuc = vec_ldl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
492 res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
493 res_vs = vec_ldl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
494 res_vus = vec_ldl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
495 res_vus = vec_ldl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
496 res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
497 res_vi = vec_ldl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
498 res_vui = vec_ldl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
499 res_vui = vec_ldl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
500 res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
501 res_vf = vec_ldl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
502 res_vsc = vec_lvxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
503 res_vsc = vec_lvxl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
504 res_vuc = vec_lvxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
505 res_vuc = vec_lvxl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
506 res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
507 res_vs = vec_lvxl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
508 res_vus = vec_lvxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
509 res_vus = vec_lvxl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
510 res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
511 res_vi = vec_lvxl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
512 res_vui = vec_lvxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
513 res_vui = vec_lvxl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
514 res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
515 res_vf = vec_lvxl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
516
517 /* vec_loge */
518 res_vf = vec_loge(vf); // CHECK: @llvm.ppc.altivec.vlogefp
519 res_vf = vec_vlogefp(vf); // CHECK: @llvm.ppc.altivec.vlogefp
520
521 /* vec_lvsl */
522 res_vuc = vec_lvsl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsl
523
524 /* vec_lvsr */
525 res_vuc = vec_lvsr(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsr
526
527 /* vec_madd */
528 res_vf =vec_madd(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
529 res_vf = vec_vmaddfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
530
531 /* vec_madds */
532 res_vs = vec_madds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
533 res_vs = vec_vmhaddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
Chris Lattnerdad40622010-04-14 03:54:58 +0000534
535 /* vec_max */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000536 res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
537 res_vuc = vec_max(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
538 res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdad40622010-04-14 03:54:58 +0000539 res_vus = vec_max(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000540 res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
541 res_vui = vec_max(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
542 res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
543 res_vsc = vec_vmaxsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
544 res_vuc = vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
545 res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
546 res_vus = vec_vmaxuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
547 res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdad40622010-04-14 03:54:58 +0000548 res_vui = vec_vmaxuw(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000549 res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
550
551 /* vec_mergeh */
552 res_vsc = vec_mergeh(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
553 res_vuc = vec_mergeh(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000554 res_vbc = vec_mergeh(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000555 res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000556 res_vp = vec_mergeh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000557 res_vus = vec_mergeh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000558 res_vbs = vec_mergeh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000559 res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
560 res_vui = vec_mergeh(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000561 res_vbi = vec_mergeh(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000562 res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
563 res_vsc = vec_vmrghb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
564 res_vuc = vec_vmrghb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000565 res_vbc = vec_vmrghb(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000566 res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000567 res_vp = vec_vmrghh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000568 res_vus = vec_vmrghh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000569 res_vbs = vec_vmrghh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000570 res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
571 res_vui = vec_vmrghw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000572 res_vbi = vec_vmrghw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000573 res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
574
575 /* vec_mergel */
576 res_vsc = vec_mergel(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
577 res_vuc = vec_mergel(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000578 res_vbc = vec_mergel(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000579 res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000580 res_vp = vec_mergeh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000581 res_vus = vec_mergel(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000582 res_vbs = vec_mergel(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000583 res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
584 res_vui = vec_mergel(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000585 res_vbi = vec_mergel(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000586 res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
587 res_vsc = vec_vmrglb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
588 res_vuc = vec_vmrglb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000589 res_vbc = vec_vmrglb(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000590 res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000591 res_vp = vec_vmrglh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000592 res_vus = vec_vmrglh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000593 res_vbs = vec_vmrglh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000594 res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
595 res_vui = vec_vmrglw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000596 res_vbi = vec_vmrglw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000597 res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
Chris Lattnerdad40622010-04-14 03:54:58 +0000598
599 /* vec_mfvscr */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000600 vus = vec_mfvscr(); // CHECK: @llvm.ppc.altivec.mfvscr
Chris Lattnerdad40622010-04-14 03:54:58 +0000601
602 /* vec_min */
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000603 res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
604 res_vuc = vec_min(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
605 res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
Chris Lattnerdad40622010-04-14 03:54:58 +0000606 res_vus = vec_min(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000607 res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
608 res_vui = vec_min(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
609 res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
610 res_vsc = vec_vminsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
611 res_vuc = vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
612 res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
613 res_vus = vec_vminuh(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
614 res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
Chris Lattnerdad40622010-04-14 03:54:58 +0000615 res_vui = vec_vminuw(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000616 res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
617
618 /* vec_mladd */
619 res_vus = vec_mladd(vus, vus, vus); // CHECK: mul <8 x i16>
620 // CHECK: add <8 x i16>
621
Chris Lattner51924e512010-06-26 21:25:03 +0000622 res_vs = vec_mladd(vus, vs, vs); // CHECK: mul nsw <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000623 // CHECK: add nsw <8 x i16>
624
Chris Lattner51924e512010-06-26 21:25:03 +0000625 res_vs = vec_mladd(vs, vus, vus); // CHECK: mul nsw <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000626 // CHECK: add nsw <8 x i16>
627
Chris Lattner51924e512010-06-26 21:25:03 +0000628 res_vs = vec_mladd(vs, vs, vs); // CHECK: mul nsw <8 x i16>
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000629 // CHECK: add nsw <8 x i16>
630
631 /* vec_mradds */
632 res_vs = vec_mradds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
633 res_vs = vec_vmhraddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
634
635 /* vec_msum */
636 res_vi = vec_msum(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
637 res_vui = vec_msum(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
638 res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
639 res_vui = vec_msum(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
640 res_vi = vec_vmsummbm(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
641 res_vui = vec_vmsumubm(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
642 res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
643 res_vui = vec_vmsumuhm(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
644
645 /* vec_msums */
646 res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
647 res_vui = vec_msums(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
648 res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
649 res_vui = vec_vmsumuhs(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
Chris Lattnerdad40622010-04-14 03:54:58 +0000650
651 /* vec_mtvscr */
Chris Lattnerdd6697b2010-04-14 20:35:39 +0000652 vec_mtvscr(vsc); // CHECK: @llvm.ppc.altivec.mtvscr
Chris Lattnerdad40622010-04-14 03:54:58 +0000653
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000654 /* vec_mule */
655 res_vs = vec_mule(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
656 res_vus = vec_mule(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
657 res_vi = vec_mule(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
658 res_vui = vec_mule(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
659 res_vs = vec_vmulesb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
660 res_vus = vec_vmuleub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
661 res_vi = vec_vmulesh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
662 res_vui = vec_vmuleuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
Chris Lattnerdad40622010-04-14 03:54:58 +0000663
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000664 /* vec_mulo */
665 res_vs = vec_mulo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
666 res_vus = vec_mulo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
667 res_vi = vec_mulo(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
668 res_vui = vec_mulo(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
669 res_vs = vec_vmulosb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
670 res_vus = vec_vmuloub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
671 res_vi = vec_vmulosh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
672 res_vui = vec_vmulouh(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
673
674 /* vec_nmsub */
675 res_vf = vec_nmsub(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
676 res_vf = vec_vnmsubfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
677
678 /* vec_nor */
679 res_vsc = vec_nor(vsc, vsc); // CHECK: or <16 x i8>
680 // CHECK: xor <16 x i8>
681
682 res_vuc = vec_nor(vuc, vuc); // CHECK: or <16 x i8>
683 // CHECK: xor <16 x i8>
684
685 res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16>
686 // CHECK: xor <8 x i16>
687
688 res_vus = vec_nor(vus, vus); // CHECK: or <8 x i16>
689 // CHECK: xor <8 x i16>
690
691 res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32>
692 // CHECK: xor <4 x i32>
693
694 res_vui = vec_nor(vui, vui); // CHECK: or <4 x i32>
695 // CHECK: xor <4 x i32>
696
697 res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32>
698 // CHECK: xor <4 x i32>
699
700 res_vsc = vec_vnor(vsc, vsc); // CHECK: or <16 x i8>
701 // CHECK: xor <16 x i8>
702
703 res_vuc = vec_vnor(vuc, vuc); // CHECK: or <16 x i8>
704 // CHECK: xor <16 x i8>
705
706 res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16>
707 // CHECK: xor <8 x i16>
708
709 res_vus = vec_vnor(vus, vus); // CHECK: or <8 x i16>
710 // CHECK: xor <8 x i16>
711
712 res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32>
713 // CHECK: xor <4 x i32>
714
715 res_vui = vec_vnor(vui, vui); // CHECK: or <4 x i32>
716 // CHECK: xor <4 x i32>
717
718 res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32>
719 // CHECK: xor <4 x i32>
720
721 /* vec_or */
722 res_vsc = vec_or(vsc, vsc); // CHECK: or <16 x i8>
723 res_vuc = vec_or(vuc, vuc); // CHECK: or <16 x i8>
724 res_vs = vec_or(vs, vs); // CHECK: or <8 x i16>
725 res_vus = vec_or(vus, vus); // CHECK: or <8 x i16>
726 res_vi = vec_or(vi, vi); // CHECK: or <4 x i32>
727 res_vui = vec_or(vui, vui); // CHECK: or <4 x i32>
728 res_vf = vec_or(vf, vf); // CHECK: or <4 x i32>
729 res_vsc = vec_vor(vsc, vsc); // CHECK: or <16 x i8>
730 res_vuc = vec_vor(vuc, vuc); // CHECK: or <16 x i8>
731 res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16>
732 res_vus = vec_vor(vus, vus); // CHECK: or <8 x i16>
733 res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32>
734 res_vui = vec_vor(vui, vui); // CHECK: or <4 x i32>
735 res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32>
736
737 /* vec_pack */
738 res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
739 res_vuc = vec_pack(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000740 res_vbc = vec_pack(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000741 res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
742 res_vus = vec_pack(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000743 res_vbs = vec_pack(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000744 res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
745 res_vuc = vec_vpkuhum(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000746 res_vbc = vec_vpkuhum(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000747 res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
748 res_vus = vec_vpkuwum(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000749 res_vbs = vec_vpkuwum(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000750
751 /* vec_packpx */
752 res_vp = vec_packpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
753 res_vp = vec_vpkpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
754
755 /* vec_packs */
756 res_vsc = vec_packs(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
757 res_vuc = vec_packs(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
758 res_vs = vec_packs(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
759 res_vus = vec_packs(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
760 res_vsc = vec_vpkshss(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
761 res_vuc = vec_vpkuhus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
762 res_vs = vec_vpkswss(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
763 res_vus = vec_vpkuwus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
764
765 /* vec_packsu */
766 res_vuc = vec_packsu(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
767 res_vuc = vec_packsu(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
768 res_vus = vec_packsu(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
769 res_vus = vec_packsu(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
770 res_vuc = vec_vpkshus(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
771 res_vuc = vec_vpkshus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
772 res_vus = vec_vpkswus(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
773 res_vus = vec_vpkswus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
774
775 /* vec_perm */
776 res_vsc = vec_perm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
777 res_vuc = vec_perm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000778 res_vbc = vec_perm(vbc, vbc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000779 res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
780 res_vus = vec_perm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000781 res_vbs = vec_perm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
782 res_vp = vec_perm(vp, vp, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000783 res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
784 res_vui = vec_perm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000785 res_vbi = vec_perm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000786 res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
787 res_vsc = vec_vperm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
788 res_vuc = vec_vperm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000789 res_vbc = vec_vperm(vbc, vbc, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000790 res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
791 res_vus = vec_vperm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000792 res_vbs = vec_vperm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
793 res_vp = vec_vperm(vp, vp, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000794 res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
795 res_vui = vec_vperm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000796 res_vbi = vec_vperm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000797 res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
798
799 /* vec_re */
800 res_vf = vec_re(vf); // CHECK: @llvm.ppc.altivec.vrefp
801 res_vf = vec_vrefp(vf); // CHECK: @llvm.ppc.altivec.vrefp
802
803 /* vec_rl */
804 res_vsc = vec_rl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
805 res_vuc = vec_rl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
806 res_vs = vec_rl(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
807 res_vus = vec_rl(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
808 res_vi = vec_rl(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
809 res_vui = vec_rl(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
810 res_vsc = vec_vrlb(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
811 res_vuc = vec_vrlb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
812 res_vs = vec_vrlh(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
813 res_vus = vec_vrlh(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
814 res_vi = vec_vrlw(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
815 res_vui = vec_vrlw(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
816
817 /* vec_round */
818 res_vf = vec_round(vf); // CHECK: @llvm.ppc.altivec.vrfin
819 res_vf = vec_vrfin(vf); // CHECK: @llvm.ppc.altivec.vrfin
820
821 /* vec_rsqrte */
822 res_vf = vec_rsqrte(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
823 res_vf = vec_vrsqrtefp(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
824
825 /* vec_sel */
826 res_vsc = vec_sel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
827 // CHECK: and <16 x i8>
828 // CHECK: and <16 x i8>
829 // CHECK: or <16 x i8>
830
831 res_vuc = vec_sel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
832 // CHECK: and <16 x i8>
833 // CHECK: and <16 x i8>
834 // CHECK: or <16 x i8>
835
836 res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16>
837 // CHECK: and <8 x i16>
838 // CHECK: and <8 x i16>
839 // CHECK: or <8 x i16>
840
841
842 res_vus = vec_sel(vus, vus, vus); // CHECK: xor <8 x i16>
843 // CHECK: and <8 x i16>
844 // CHECK: and <8 x i16>
845 // CHECK: or <8 x i16>
846
847 res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32>
848 // CHECK: and <4 x i32>
849 // CHECK: and <4 x i32>
850 // CHECK: or <4 x i32>
851
852
853 res_vui = vec_sel(vui, vui, vui); // CHECK: xor <4 x i32>
854 // CHECK: and <4 x i32>
855 // CHECK: and <4 x i32>
856 // CHECK: or <4 x i32>
857
858
859 res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32>
860 // CHECK: and <4 x i32>
861 // CHECK: and <4 x i32>
862 // CHECK: or <4 x i32>
863
864 res_vsc = vec_vsel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
865 // CHECK: and <16 x i8>
866 // CHECK: and <16 x i8>
867 // CHECK: or <16 x i8>
868
869 res_vuc = vec_vsel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
870 // CHECK: and <16 x i8>
871 // CHECK: and <16 x i8>
872 // CHECK: or <16 x i8>
873
874 res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16>
875 // CHECK: and <8 x i16>
876 // CHECK: and <8 x i16>
877 // CHECK: or <8 x i16>
878
879
880 res_vus = vec_vsel(vus, vus, vus); // CHECK: xor <8 x i16>
881 // CHECK: and <8 x i16>
882 // CHECK: and <8 x i16>
883 // CHECK: or <8 x i16>
884
885 res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32>
886 // CHECK: and <4 x i32>
887 // CHECK: and <4 x i32>
888 // CHECK: or <4 x i32>
889
890
891 res_vui = vec_vsel(vui, vui, vui); // CHECK: xor <4 x i32>
892 // CHECK: and <4 x i32>
893 // CHECK: and <4 x i32>
894 // CHECK: or <4 x i32>
895
896
897 res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32>
898 // CHECK: and <4 x i32>
899 // CHECK: and <4 x i32>
900 // CHECK: or <4 x i32>
901
902
903 /* vec_sl */
904 res_vsc = vec_sl(vsc, vuc); // CHECK: shl <16 x i8>
905 res_vuc = vec_sl(vuc, vuc); // CHECK: shl <16 x i8>
906 res_vs = vec_sl(vs, vus); // CHECK: shl <8 x i16>
907 res_vus = vec_sl(vus, vus); // CHECK: shl <8 x i16>
908 res_vi = vec_sl(vi, vui); // CHECK: shl <4 x i32>
909 res_vui = vec_sl(vui, vui); // CHECK: shl <4 x i32>
910 res_vsc = vec_vslb(vsc, vuc); // CHECK: shl <16 x i8>
911 res_vuc = vec_vslb(vuc, vuc); // CHECK: shl <16 x i8>
912 res_vs = vec_vslh(vs, vus); // CHECK: shl <8 x i16>
913 res_vus = vec_vslh(vus, vus); // CHECK: shl <8 x i16>
914 res_vi = vec_vslw(vi, vui); // CHECK: shl <4 x i32>
915 res_vui = vec_vslw(vui, vui); // CHECK: shl <4 x i32>
916
917 /* vec_sld */
918 res_vsc = vec_sld(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
919 res_vuc = vec_sld(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
920 res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
921 res_vus = vec_sld(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000922 res_vp = vec_sld(vp, vp, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000923 res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
924 res_vui = vec_sld(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
925 res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
926 res_vsc = vec_vsldoi(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
927 res_vuc = vec_vsldoi(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
928 res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
929 res_vus = vec_vsldoi(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +0000930 res_vp = vec_vsldoi(vp, vp, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +0000931 res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
932 res_vui = vec_vsldoi(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
933 res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
934
935 /* vec_sll */
936 res_vsc = vec_sll(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
937 res_vsc = vec_sll(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
938 res_vsc = vec_sll(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
939 res_vuc = vec_sll(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
940 res_vuc = vec_sll(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
941 res_vuc = vec_sll(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
942 res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
943 res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
944 res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
945 res_vus = vec_sll(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
946 res_vus = vec_sll(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
947 res_vus = vec_sll(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
948 res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
949 res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
950 res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
951 res_vui = vec_sll(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
952 res_vui = vec_sll(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
953 res_vui = vec_sll(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
954 res_vsc = vec_vsl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
955 res_vsc = vec_vsl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
956 res_vsc = vec_vsl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
957 res_vuc = vec_vsl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
958 res_vuc = vec_vsl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
959 res_vuc = vec_vsl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
960 res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
961 res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
962 res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
963 res_vus = vec_vsl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
964 res_vus = vec_vsl(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
965 res_vus = vec_vsl(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
966 res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
967 res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
968 res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
969 res_vui = vec_vsl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
970 res_vui = vec_vsl(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
971 res_vui = vec_vsl(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
972
973 /* vec_slo */
974 res_vsc = vec_slo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
975 res_vsc = vec_slo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
976 res_vuc = vec_slo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
977 res_vuc = vec_slo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
978 res_vs = vec_slo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
979 res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
980 res_vus = vec_slo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
981 res_vus = vec_slo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
982 res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
983 res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
984 res_vui = vec_slo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
985 res_vui = vec_slo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
986 res_vf = vec_slo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
987 res_vf = vec_slo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
988 res_vsc = vec_vslo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
989 res_vsc = vec_vslo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
990 res_vuc = vec_vslo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
991 res_vuc = vec_vslo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
992 res_vs = vec_vslo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
993 res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
994 res_vus = vec_vslo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
995 res_vus = vec_vslo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
996 res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
997 res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
998 res_vui = vec_vslo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
999 res_vui = vec_vslo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
1000 res_vf = vec_vslo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
1001 res_vf = vec_vslo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
1002
1003 /* vec_splat */
1004 res_vsc = vec_splat(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
1005 res_vuc = vec_splat(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +00001006 res_vbc = vec_splat(vbc, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001007 res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
1008 res_vus = vec_splat(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +00001009 res_vbs = vec_splat(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
1010 res_vp = vec_splat(vp, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001011 res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
1012 res_vui = vec_splat(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +00001013 res_vbi = vec_splat(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001014 res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
1015 res_vsc = vec_vspltb(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
1016 res_vuc = vec_vspltb(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +00001017 res_vbc = vec_vspltb(vbc, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001018 res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
1019 res_vus = vec_vsplth(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +00001020 res_vbs = vec_vsplth(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
1021 res_vp = vec_vsplth(vp, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001022 res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
1023 res_vui = vec_vspltw(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Yartsev9e968982010-08-19 03:00:09 +00001024 res_vbi = vec_vspltw(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001025 res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
1026
1027 /* vec_splat_s8 */
1028 res_vsc = vec_splat_s8(0x09); // TODO: add check
1029 res_vsc = vec_vspltisb(0x09); // TODO: add check
1030
1031 /* vec_splat_s16 */
1032 res_vs = vec_splat_s16(0x09); // TODO: add check
1033 res_vs = vec_vspltish(0x09); // TODO: add check
1034
1035 /* vec_splat_s32 */
1036 res_vi = vec_splat_s32(0x09); // TODO: add check
1037 res_vi = vec_vspltisw(0x09); // TODO: add check
1038
1039 /* vec_splat_u8 */
1040 res_vuc = vec_splat_u8(0x09); // TODO: add check
1041
1042 /* vec_splat_u16 */
1043 res_vus = vec_splat_u16(0x09); // TODO: add check
1044
1045 /* vec_splat_u32 */
1046 res_vui = vec_splat_u32(0x09); // TODO: add check
1047
1048 /* vec_sr */
1049 res_vsc = vec_sr(vsc, vuc); // CHECK: shr <16 x i8>
1050 res_vuc = vec_sr(vuc, vuc); // CHECK: shr <16 x i8>
1051 res_vs = vec_sr(vs, vus); // CHECK: shr <8 x i16>
1052 res_vus = vec_sr(vus, vus); // CHECK: shr <8 x i16>
1053 res_vi = vec_sr(vi, vui); // CHECK: shr <4 x i32>
1054 res_vui = vec_sr(vui, vui); // CHECK: shr <4 x i32>
1055 res_vsc = vec_vsrb(vsc, vuc); // CHECK: shr <16 x i8>
1056 res_vuc = vec_vsrb(vuc, vuc); // CHECK: shr <16 x i8>
1057 res_vs = vec_vsrh(vs, vus); // CHECK: shr <8 x i16>
1058 res_vus = vec_vsrh(vus, vus); // CHECK: shr <8 x i16>
1059 res_vi = vec_vsrw(vi, vui); // CHECK: shr <4 x i32>
1060 res_vui = vec_vsrw(vui, vui); // CHECK: shr <4 x i32>
1061
1062 /* vec_sra */
1063 res_vsc = vec_sra(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1064 res_vuc = vec_sra(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1065 res_vs = vec_sra(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
1066 res_vus = vec_sra(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
1067 res_vi = vec_sra(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
1068 res_vui = vec_sra(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
1069 res_vsc = vec_vsrab(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1070 res_vuc = vec_vsrab(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1071 res_vs = vec_vsrah(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
1072 res_vus = vec_vsrah(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
1073 res_vi = vec_vsraw(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
1074 res_vui = vec_vsraw(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
1075
1076 /* vec_srl */
1077 res_vsc = vec_srl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1078 res_vsc = vec_srl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
1079 res_vsc = vec_srl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
1080 res_vuc = vec_srl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1081 res_vuc = vec_srl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
1082 res_vuc = vec_srl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
1083 res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
1084 res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
1085 res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
1086 res_vus = vec_srl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
1087 res_vus = vec_srl(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
1088 res_vus = vec_srl(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
1089 res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
1090 res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
1091 res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
1092 res_vui = vec_srl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
1093 res_vui = vec_srl(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
1094 res_vui = vec_srl(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
1095 res_vsc = vec_vsr(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1096 res_vsc = vec_vsr(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
1097 res_vsc = vec_vsr(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
1098 res_vuc = vec_vsr(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1099 res_vuc = vec_vsr(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
1100 res_vuc = vec_vsr(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
1101 res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
1102 res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
1103 res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
1104 res_vus = vec_vsr(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
1105 res_vus = vec_vsr(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
1106 res_vus = vec_vsr(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
1107 res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
1108 res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
1109 res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
1110 res_vui = vec_vsr(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
1111 res_vui = vec_vsr(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
1112 res_vui = vec_vsr(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
1113
1114 /* vec_sro */
1115 res_vsc = vec_sro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1116 res_vsc = vec_sro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1117 res_vuc = vec_sro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1118 res_vuc = vec_sro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1119 res_vs = vec_sro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
1120 res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
1121 res_vus = vec_sro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
1122 res_vus = vec_sro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
1123 res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
1124 res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
1125 res_vui = vec_sro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
1126 res_vui = vec_sro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
1127 res_vf = vec_sro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
1128 res_vf = vec_sro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
1129 res_vsc = vec_vsro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1130 res_vsc = vec_vsro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1131 res_vuc = vec_vsro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1132 res_vuc = vec_vsro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1133 res_vs = vec_vsro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
1134 res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
1135 res_vus = vec_vsro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
1136 res_vus = vec_vsro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
1137 res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
1138 res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
1139 res_vui = vec_vsro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
1140 res_vui = vec_vsro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
1141 res_vf = vec_vsro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
1142 res_vf = vec_vsro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
1143
1144 /* vec_st */
1145 vec_st(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
1146 vec_st(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
1147 vec_st(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
1148 vec_st(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
1149 vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
1150 vec_st(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
1151 vec_st(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
1152 vec_st(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
1153 vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
1154 vec_st(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
1155 vec_st(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
1156 vec_st(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
1157 vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
1158 vec_st(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
1159 vec_stvx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
1160 vec_stvx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
1161 vec_stvx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
1162 vec_stvx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
1163 vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
1164 vec_stvx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
1165 vec_stvx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
1166 vec_stvx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
1167 vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
1168 vec_stvx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
1169 vec_stvx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
1170 vec_stvx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
1171 vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
1172 vec_stvx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
1173
1174 /* vec_ste */
1175 vec_ste(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
1176 vec_ste(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
1177 vec_ste(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
1178 vec_ste(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
1179 vec_ste(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
1180 vec_ste(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
1181 vec_ste(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
1182 vec_stvebx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
1183 vec_stvebx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
1184 vec_stvehx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
1185 vec_stvehx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
1186 vec_stvewx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
1187 vec_stvewx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
1188 vec_stvewx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
1189
1190 /* vec_stl */
1191 vec_stl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
1192 vec_stl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
1193 vec_stl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
1194 vec_stl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
1195 vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
1196 vec_stl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
1197 vec_stl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
1198 vec_stl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1199 vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
1200 vec_stl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
1201 vec_stl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
1202 vec_stl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
1203 vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1204 vec_stl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1205 vec_stvxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
1206 vec_stvxl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
1207 vec_stvxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
1208 vec_stvxl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
1209 vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
1210 vec_stvxl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
1211 vec_stvxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
1212 vec_stvxl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1213 vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
1214 vec_stvxl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
1215 vec_stvxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
1216 vec_stvxl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
1217 vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1218 vec_stvxl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1219
1220 /* vec_sub */
1221 res_vsc = vec_sub(vsc, vsc); // CHECK: sub nsw <16 x i8>
1222 res_vuc = vec_sub(vuc, vuc); // CHECK: sub <16 x i8>
1223 res_vs = vec_sub(vs, vs); // CHECK: sub nsw <8 x i16>
1224 res_vus = vec_sub(vus, vus); // CHECK: sub <8 x i16>
1225 res_vi = vec_sub(vi, vi); // CHECK: sub nsw <4 x i32>
1226 res_vui = vec_sub(vui, vui); // CHECK: sub <4 x i32>
1227 res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float>
1228 res_vsc = vec_vsububm(vsc, vsc); // CHECK: sub nsw <16 x i8>
1229 res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8>
1230 res_vs = vec_vsubuhm(vs, vs); // CHECK: sub nsw <8 x i16>
1231 res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16>
1232 res_vi = vec_vsubuwm(vi, vi); // CHECK: sub nsw <4 x i32>
1233 res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32>
1234 res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float>
1235
1236 /* vec_subc */
1237 res_vui = vec_subc(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1238 res_vui = vec_vsubcuw(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1239
1240 /* vec_subs */
1241 res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1242 res_vuc = vec_subs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1243 res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1244 res_vus = vec_subs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1245 res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1246 res_vui = vec_subs(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1247 res_vsc = vec_vsubsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1248 res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1249 res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1250 res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1251 res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1252 res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1253
1254 /* vec_sum4s */
1255 res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1256 res_vui = vec_sum4s(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1257 res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1258 res_vi = vec_vsum4sbs(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1259 res_vui = vec_vsum4ubs(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1260 res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1261
1262 /* vec_sum2s */
1263 res_vi = vec_sum2s(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1264 res_vi = vec_vsum2sws(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1265
1266 /* vec_sums */
1267 res_vi = vec_sums(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1268 res_vi = vec_vsumsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1269
1270 /* vec_trunc */
1271 res_vf = vec_trunc(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1272 res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1273
1274 /* vec_unpackh */
1275 res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1276 res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1277 res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1278 res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1279
1280 /* vec_unpackl */
1281 res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1282 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1283 res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1284 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1285
1286 /* vec_xor */
1287 res_vsc = vec_xor(vsc, vsc); // CHECK: xor <16 x i8>
1288 res_vuc = vec_xor(vuc, vuc); // CHECK: xor <16 x i8>
1289 res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16>
1290 res_vus = vec_xor(vus, vus); // CHECK: xor <8 x i16>
1291 res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32>
1292 res_vui = vec_xor(vui, vui); // CHECK: xor <4 x i32>
1293 res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32>
1294 res_vsc = vec_vxor(vsc, vsc); // CHECK: xor <16 x i8>
1295 res_vuc = vec_vxor(vuc, vuc); // CHECK: xor <16 x i8>
1296 res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16>
1297 res_vus = vec_vxor(vus, vus); // CHECK: xor <8 x i16>
1298 res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32>
1299 res_vui = vec_vxor(vui, vui); // CHECK: xor <4 x i32>
1300 res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32>
1301
1302 /* ------------------------------ predicates -------------------------------------- */
Chris Lattnerdad40622010-04-14 03:54:58 +00001303
1304 /* vec_all_eq */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001305 res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001306 res_i = vec_all_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1307 res_i = vec_all_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1308 res_i = vec_all_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1309 res_i = vec_all_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1310 res_i = vec_all_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1311 res_i = vec_all_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1312
1313 /* vec_all_ge */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001314 res_i = vec_all_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001315 res_i = vec_all_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1316 res_i = vec_all_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1317 res_i = vec_all_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1318 res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1319 res_i = vec_all_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001320 res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001321
1322 /* vec_all_gt */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001323 res_i = vec_all_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001324 res_i = vec_all_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1325 res_i = vec_all_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1326 res_i = vec_all_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1327 res_i = vec_all_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1328 res_i = vec_all_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1329 res_i = vec_all_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1330
1331 /* vec_all_in */
1332 res_i = vec_all_in(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1333
1334 /* vec_all_le */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001335 res_i = vec_all_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001336 res_i = vec_all_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1337 res_i = vec_all_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1338 res_i = vec_all_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1339 res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1340 res_i = vec_all_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001341 res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001342
1343 /* vec_all_nan */
1344 res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1345
1346 /* vec_all_ne */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001347 res_i = vec_all_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001348 res_i = vec_all_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1349 res_i = vec_all_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1350 res_i = vec_all_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1351 res_i = vec_all_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1352 res_i = vec_all_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1353 res_i = vec_all_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1354
1355 /* vec_all_nge */
1356 res_i = vec_all_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1357
1358 /* vec_all_ngt */
1359 res_i = vec_all_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1360
1361 /* vec_all_nle */
1362 res_i = vec_all_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1363
1364 /* vec_all_nlt */
1365 res_i = vec_all_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1366
1367 /* vec_all_numeric */
1368 res_i = vec_all_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1369
1370 /* vec_any_eq */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001371 res_i = vec_any_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001372 res_i = vec_any_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1373 res_i = vec_any_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1374 res_i = vec_any_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1375 res_i = vec_any_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1376 res_i = vec_any_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1377 res_i = vec_any_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1378
1379 /* vec_any_ge */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001380 res_i = vec_any_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001381 res_i = vec_any_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1382 res_i = vec_any_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1383 res_i = vec_any_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1384 res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1385 res_i = vec_any_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001386 res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001387
1388 /* vec_any_gt */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001389 res_i = vec_any_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001390 res_i = vec_any_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1391 res_i = vec_any_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1392 res_i = vec_any_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1393 res_i = vec_any_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1394 res_i = vec_any_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1395 res_i = vec_any_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1396
1397 /* vec_any_le */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001398 res_i = vec_any_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001399 res_i = vec_any_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1400 res_i = vec_any_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1401 res_i = vec_any_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1402 res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1403 res_i = vec_any_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikovcc50b7d2010-06-19 09:47:18 +00001404 res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001405
1406 /* vec_any_lt */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001407 res_i = vec_any_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001408 res_i = vec_any_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1409 res_i = vec_any_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1410 res_i = vec_any_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1411 res_i = vec_any_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1412 res_i = vec_any_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1413 res_i = vec_any_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1414
1415 /* vec_any_nan */
1416 res_i = vec_any_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1417
1418 /* vec_any_ne */
Chris Lattnerdd6697b2010-04-14 20:35:39 +00001419 res_i = vec_any_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdad40622010-04-14 03:54:58 +00001420 res_i = vec_any_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1421 res_i = vec_any_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1422 res_i = vec_any_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1423 res_i = vec_any_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1424 res_i = vec_any_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1425 res_i = vec_any_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1426
1427 /* vec_any_nge */
1428 res_i = vec_any_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1429
1430 /* vec_any_ngt */
1431 res_i = vec_any_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1432
1433 /* vec_any_nle */
1434 res_i = vec_any_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1435
1436 /* vec_any_nlt */
1437 res_i = vec_any_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1438
1439 /* vec_any_numeric */
1440 res_i = vec_any_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1441
1442 /* vec_any_out */
1443 res_i = vec_any_out(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1444
1445 return 0;
1446}