blob: 620cb16bbb20fa24a5f374976d1849b7acd62da7 [file] [log] [blame]
Chris Lattnerdd173942010-04-14 03:54:58 +00001// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
2
Anton Yartsev05e35552010-08-16 16:22:12 +00003vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
Chris Lattner35b21b82010-06-27 01:06:27 +00004vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
5vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
Anton Yartsev05e35552010-08-16 16:22:12 +00006vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 };
Chris Lattner35b21b82010-06-27 01:06:27 +00007vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 };
8vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 };
Anton Yartsev05e35552010-08-16 16:22:12 +00009vector bool int vbi = { 1, 0, 1, 0 };
Chris Lattner35b21b82010-06-27 01:06:27 +000010vector int vi = { -1, 2, -3, 4 };
11vector unsigned int vui = { 1, 2, 3, 4 };
12vector float vf = { -1.5, 2.5, -3.5, 4.5 };
Chris Lattnerdd173942010-04-14 03:54:58 +000013
Anton Yartsev05e35552010-08-16 16:22:12 +000014vector bool char res_vbc;
Chris Lattner35b21b82010-06-27 01:06:27 +000015vector signed char res_vsc;
16vector unsigned char res_vuc;
Anton Yartsev05e35552010-08-16 16:22:12 +000017vector bool short res_vbs;
Chris Lattner35b21b82010-06-27 01:06:27 +000018vector short res_vs;
19vector unsigned short res_vus;
Chris Lattner35b21b82010-06-27 01:06:27 +000020vector pixel res_vp;
Anton Yartsev05e35552010-08-16 16:22:12 +000021vector bool int res_vbi;
Chris Lattner35b21b82010-06-27 01:06:27 +000022vector int res_vi;
23vector unsigned int res_vui;
24vector float res_vf;
Chris Lattnerdd173942010-04-14 03:54:58 +000025
Chris Lattner35b21b82010-06-27 01:06:27 +000026signed char param_sc;
27unsigned char param_uc;
28short param_s;
29unsigned short param_us;
30int param_i;
31unsigned int param_ui;
32float param_f;
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000033
Chris Lattner35b21b82010-06-27 01:06:27 +000034int res_i;
35
Anton Yartsev05e35552010-08-16 16:22:12 +000036// CHECK: define void @test1
37void test1() {
Chris Lattnerdd173942010-04-14 03:54:58 +000038
39 /* vec_abs */
Chris Lattnere70ffd62010-06-26 20:27:24 +000040 vsc = vec_abs(vsc); // CHECK: sub nsw <16 x i8> zeroinitializer
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000041 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdd173942010-04-14 03:54:58 +000042
Chris Lattnere70ffd62010-06-26 20:27:24 +000043 vs = vec_abs(vs); // CHECK: sub nsw <8 x i16> zeroinitializer
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000044 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdd173942010-04-14 03:54:58 +000045
Chris Lattnere70ffd62010-06-26 20:27:24 +000046 vi = vec_abs(vi); // CHECK: sub nsw <4 x i32> zeroinitializer
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000047 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdd173942010-04-14 03:54:58 +000048
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000049 vf = vec_abs(vf); // CHECK: and <4 x i32>
Chris Lattnerdd173942010-04-14 03:54:58 +000050
51 /* vec_abs */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000052 vsc = vec_abss(vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
53 // CHECK: @llvm.ppc.altivec.vmaxsb
Chris Lattnerdd173942010-04-14 03:54:58 +000054
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000055 vs = vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs
56 // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdd173942010-04-14 03:54:58 +000057
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000058 vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws
59 // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdd173942010-04-14 03:54:58 +000060
61 /* vec_add */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000062 res_vsc = vec_add(vsc, vsc); // CHECK: add nsw <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +000063 res_vsc = vec_add(vbc, vsc); // CHECK: add nsw <16 x i8>
64 res_vsc = vec_add(vsc, vbc); // CHECK: add nsw <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000065 res_vuc = vec_add(vuc, vuc); // CHECK: add <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +000066 res_vuc = vec_add(vbc, vuc); // CHECK: add <16 x i8>
67 res_vuc = vec_add(vuc, vbc); // CHECK: add <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000068 res_vs = vec_add(vs, vs); // CHECK: add nsw <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +000069 res_vs = vec_add(vbs, vs); // CHECK: add nsw <8 x i16>
70 res_vs = vec_add(vs, vbs); // CHECK: add nsw <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000071 res_vus = vec_add(vus, vus); // CHECK: add <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +000072 res_vus = vec_add(vbs, vus); // CHECK: add <8 x i16>
73 res_vus = vec_add(vus, vbs); // CHECK: add <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000074 res_vi = vec_add(vi, vi); // CHECK: add nsw <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +000075 res_vi = vec_add(vbi, vi); // CHECK: add nsw <4 x i32>
76 res_vi = vec_add(vi, vbi); // CHECK: add nsw <4 x i32>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000077 res_vui = vec_add(vui, vui); // CHECK: add <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +000078 res_vui = vec_add(vbi, vui); // CHECK: add <4 x i32>
79 res_vui = vec_add(vui, vbi); // CHECK: add <4 x i32>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000080 res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float>
81 res_vsc = vec_vaddubm(vsc, vsc); // CHECK: add nsw <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +000082 res_vsc = vec_vaddubm(vbc, vsc); // CHECK: add nsw <16 x i8>
83 res_vsc = vec_vaddubm(vsc, vbc); // CHECK: add nsw <16 x i8>
Chris Lattnerdd173942010-04-14 03:54:58 +000084 res_vuc = vec_vaddubm(vuc, vuc); // CHECK: add <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +000085 res_vuc = vec_vaddubm(vbc, vuc); // CHECK: add <16 x i8>
86 res_vuc = vec_vaddubm(vuc, vbc); // CHECK: add <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000087 res_vs = vec_vadduhm(vs, vs); // CHECK: add nsw <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +000088 res_vs = vec_vadduhm(vbs, vs); // CHECK: add nsw <8 x i16>
89 res_vs = vec_vadduhm(vs, vbs); // CHECK: add nsw <8 x i16>
Chris Lattnerdd173942010-04-14 03:54:58 +000090 res_vus = vec_vadduhm(vus, vus); // CHECK: add <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +000091 res_vus = vec_vadduhm(vbs, vus); // CHECK: add <8 x i16>
92 res_vus = vec_vadduhm(vus, vbs); // CHECK: add <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000093 res_vi = vec_vadduwm(vi, vi); // CHECK: add nsw <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +000094 res_vi = vec_vadduwm(vbi, vi); // CHECK: add nsw <4 x i32>
95 res_vi = vec_vadduwm(vi, vbi); // CHECK: add nsw <4 x i32>
Chris Lattnerdd173942010-04-14 03:54:58 +000096 res_vui = vec_vadduwm(vui, vui); // CHECK: add <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +000097 res_vui = vec_vadduwm(vbi, vui); // CHECK: add <4 x i32>
98 res_vui = vec_vadduwm(vui, vbi); // CHECK: add <4 x i32>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +000099 res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float>
Chris Lattnerdd173942010-04-14 03:54:58 +0000100
101 /* vec_addc */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000102 res_vui = vec_addc(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
Chris Lattnerdd173942010-04-14 03:54:58 +0000103 res_vui = vec_vaddcuw(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
104
105 /* vec_adds */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000106 res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
Anton Yartsev05e35552010-08-16 16:22:12 +0000107 res_vsc = vec_adds(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
108 res_vsc = vec_adds(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000109 res_vuc = vec_adds(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Yartsev05e35552010-08-16 16:22:12 +0000110 res_vuc = vec_adds(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
111 res_vuc = vec_adds(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000112 res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
Anton Yartsev05e35552010-08-16 16:22:12 +0000113 res_vs = vec_adds(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
114 res_vs = vec_adds(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000115 res_vus = vec_adds(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Yartsev05e35552010-08-16 16:22:12 +0000116 res_vus = vec_adds(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
117 res_vus = vec_adds(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000118 res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
Anton Yartsev05e35552010-08-16 16:22:12 +0000119 res_vi = vec_adds(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
120 res_vi = vec_adds(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000121 res_vui = vec_adds(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
Anton Yartsev05e35552010-08-16 16:22:12 +0000122 res_vui = vec_adds(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
123 res_vui = vec_adds(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000124 res_vsc = vec_vaddsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
Anton Yartsev05e35552010-08-16 16:22:12 +0000125 res_vsc = vec_vaddsbs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
126 res_vsc = vec_vaddsbs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
Chris Lattnerdd173942010-04-14 03:54:58 +0000127 res_vuc = vec_vaddubs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Yartsev05e35552010-08-16 16:22:12 +0000128 res_vuc = vec_vaddubs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
129 res_vuc = vec_vaddubs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000130 res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
Anton Yartsev05e35552010-08-16 16:22:12 +0000131 res_vs = vec_vaddshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
132 res_vs = vec_vaddshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
Chris Lattnerdd173942010-04-14 03:54:58 +0000133 res_vus = vec_vadduhs(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Yartsev05e35552010-08-16 16:22:12 +0000134 res_vus = vec_vadduhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
135 res_vus = vec_vadduhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000136 res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
Anton Yartsev05e35552010-08-16 16:22:12 +0000137 res_vi = vec_vaddsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
138 res_vi = vec_vaddsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
Chris Lattnerdd173942010-04-14 03:54:58 +0000139 res_vui = vec_vadduws(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
Anton Yartsev05e35552010-08-16 16:22:12 +0000140 res_vui = vec_vadduws(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
141 res_vui = vec_vadduws(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
Chris Lattnerdd173942010-04-14 03:54:58 +0000142
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000143 /* vec_and */
144 res_vsc = vec_and(vsc, vsc); // CHECK: and <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +0000145 res_vsc = vec_and(vbc, vsc); // CHECK: and <16 x i8>
146 res_vsc = vec_and(vsc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000147 res_vuc = vec_and(vuc, vuc); // CHECK: and <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +0000148 res_vuc = vec_and(vbc, vuc); // CHECK: and <16 x i8>
149 res_vuc = vec_and(vuc, vbc); // CHECK: and <16 x i8>
150 res_vbc = vec_and(vbc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000151 res_vs = vec_and(vs, vs); // CHECK: and <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +0000152 res_vs = vec_and(vbs, vs); // CHECK: and <8 x i16>
153 res_vs = vec_and(vs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000154 res_vus = vec_and(vus, vus); // CHECK: and <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +0000155 res_vus = vec_and(vbs, vus); // CHECK: and <8 x i16>
156 res_vus = vec_and(vus, vbs); // CHECK: and <8 x i16>
157 res_vbs = vec_and(vbs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000158 res_vi = vec_and(vi, vi); // CHECK: and <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +0000159 res_vi = vec_and(vbi, vi); // CHECK: and <4 x i32>
160 res_vi = vec_and(vi, vbi); // CHECK: and <4 x i32>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000161 res_vui = vec_and(vui, vui); // CHECK: and <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +0000162 res_vui = vec_and(vbi, vui); // CHECK: and <4 x i32>
163 res_vui = vec_and(vui, vbi); // CHECK: and <4 x i32>
164 res_vbi = vec_and(vbi, vbi); // CHECK: and <4 x i32>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000165 res_vsc = vec_vand(vsc, vsc); // CHECK: and <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +0000166 res_vsc = vec_vand(vbc, vsc); // CHECK: and <16 x i8>
167 res_vsc = vec_vand(vsc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000168 res_vuc = vec_vand(vuc, vuc); // CHECK: and <16 x i8>
Anton Yartsev05e35552010-08-16 16:22:12 +0000169 res_vuc = vec_vand(vbc, vuc); // CHECK: and <16 x i8>
170 res_vuc = vec_vand(vuc, vbc); // CHECK: and <16 x i8>
171 res_vbc = vec_vand(vbc, vbc); // CHECK: and <16 x i8>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000172 res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +0000173 res_vs = vec_vand(vbs, vs); // CHECK: and <8 x i16>
174 res_vs = vec_vand(vs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000175 res_vus = vec_vand(vus, vus); // CHECK: and <8 x i16>
Anton Yartsev05e35552010-08-16 16:22:12 +0000176 res_vus = vec_vand(vbs, vus); // CHECK: and <8 x i16>
177 res_vus = vec_vand(vus, vbs); // CHECK: and <8 x i16>
178 res_vbs = vec_vand(vbs, vbs); // CHECK: and <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000179 res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +0000180 res_vi = vec_vand(vbi, vi); // CHECK: and <4 x i32>
181 res_vi = vec_vand(vi, vbi); // CHECK: and <4 x i32>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000182 res_vui = vec_vand(vui, vui); // CHECK: and <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +0000183 res_vui = vec_vand(vbi, vui); // CHECK: and <4 x i32>
184 res_vui = vec_vand(vui, vbi); // CHECK: and <4 x i32>
185 res_vbi = vec_vand(vbi, vbi); // CHECK: and <4 x i32>
Chris Lattnerdd173942010-04-14 03:54:58 +0000186
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000187 /* vec_andc */
188 res_vsc = vec_andc(vsc, vsc); // CHECK: xor <16 x i8>
189 // CHECK: and <16 x i8>
190
Anton Yartsev05e35552010-08-16 16:22:12 +0000191 res_vsc = vec_andc(vbc, vsc); // CHECK: xor <16 x i8>
192 // CHECK: and <16 x i8>
193
194 res_vsc = vec_andc(vsc, vbc); // CHECK: xor <16 x i8>
195 // CHECK: and <16 x i8>
196
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000197 res_vuc = vec_andc(vuc, vuc); // CHECK: xor <16 x i8>
198 // CHECK: and <16 x i8>
199
Anton Yartsev05e35552010-08-16 16:22:12 +0000200 res_vuc = vec_andc(vbc, vuc); // CHECK: xor <16 x i8>
201 // CHECK: and <16 x i8>
202
203 res_vuc = vec_andc(vuc, vbc); // CHECK: xor <16 x i8>
204 // CHECK: and <16 x i8>
205
206 res_vbc = vec_andc(vbc, vbc); // CHECK: xor <16 x i8>
207 // CHECK: and <16 x i8>
208
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000209 res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16>
210 // CHECK: and <8 x i16>
211
Anton Yartsev05e35552010-08-16 16:22:12 +0000212 res_vs = vec_andc(vbs, vs); // CHECK: xor <8 x i16>
213 // CHECK: and <8 x i16>
214
215 res_vs = vec_andc(vs, vbs); // CHECK: xor <8 x i16>
216 // CHECK: and <8 x i16>
217
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000218 res_vus = vec_andc(vus, vus); // CHECK: xor <8 x i16>
219 // CHECK: and <8 x i16>
220
Anton Yartsev05e35552010-08-16 16:22:12 +0000221 res_vus = vec_andc(vbs, vus); // CHECK: xor <8 x i16>
222 // CHECK: and <8 x i16>
223
224 res_vus = vec_andc(vus, vbs); // CHECK: xor <8 x i16>
225 // CHECK: and <8 x i16>
226
227 res_vbs = vec_andc(vbs, vbs); // CHECK: xor <8 x i16>
228 // CHECK: and <8 x i16>
229
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000230 res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32>
231 // CHECK: and <4 x i32>
232
Anton Yartsev05e35552010-08-16 16:22:12 +0000233 res_vi = vec_andc(vbi, vi); // CHECK: xor <4 x i32>
234 // CHECK: and <4 x i32>
235
236 res_vi = vec_andc(vi, vbi); // CHECK: xor <4 x i32>
237 // CHECK: and <4 x i32>
238
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000239 res_vui = vec_andc(vui, vui); // CHECK: xor <4 x i32>
240 // CHECK: and <4 x i32>
241
Anton Yartsev05e35552010-08-16 16:22:12 +0000242 res_vui = vec_andc(vbi, vui); // CHECK: xor <4 x i32>
243 // CHECK: and <4 x i32>
244
245 res_vui = vec_andc(vui, vbi); // CHECK: xor <4 x i32>
246 // CHECK: and <4 x i32>
247
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000248 res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32>
249 // CHECK: and <4 x i32>
250
Anton Yartsev05e35552010-08-16 16:22:12 +0000251 res_vf = vec_andc(vbi, vf); // CHECK: xor <4 x i32>
252 // CHECK: and <4 x i32>
253
254 res_vf = vec_andc(vf, vbi); // CHECK: xor <4 x i32>
255 // CHECK: and <4 x i32>
256
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000257 res_vsc = vec_vandc(vsc, vsc); // CHECK: xor <16 x i8>
258 // CHECK: and <16 x i8>
259
Anton Yartsev05e35552010-08-16 16:22:12 +0000260 res_vsc = vec_vandc(vbc, vsc); // CHECK: xor <16 x i8>
261 // CHECK: and <16 x i8>
262
263 res_vsc = vec_vandc(vsc, vbc); // CHECK: xor <16 x i8>
264 // CHECK: and <16 x i8>
265
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000266 res_vuc = vec_vandc(vuc, vuc); // CHECK: xor <16 x i8>
267 // CHECK: and <16 x i8>
268
Anton Yartsev05e35552010-08-16 16:22:12 +0000269 res_vuc = vec_vandc(vbc, vuc); // CHECK: xor <16 x i8>
270 // CHECK: and <16 x i8>
271
272 res_vuc = vec_vandc(vuc, vbc); // CHECK: xor <16 x i8>
273 // CHECK: and <16 x i8>
274
275 res_vbc = vec_vandc(vbc, vbc); // CHECK: xor <16 x i8>
276 // CHECK: and <16 x i8>
277
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000278 res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16>
279 // CHECK: and <8 x i16>
280
Anton Yartsev05e35552010-08-16 16:22:12 +0000281 res_vs = vec_vandc(vbs, vs); // CHECK: xor <8 x i16>
282 // CHECK: and <8 x i16>
283
284 res_vs = vec_vandc(vs, vbs); // CHECK: xor <8 x i16>
285 // CHECK: and <8 x i16>
286
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000287 res_vus = vec_vandc(vus, vus); // CHECK: xor <8 x i16>
288 // CHECK: and <8 x i16>
289
Anton Yartsev05e35552010-08-16 16:22:12 +0000290 res_vus = vec_vandc(vbs, vus); // CHECK: xor <8 x i16>
291 // CHECK: and <8 x i16>
292
293 res_vus = vec_vandc(vus, vbs); // CHECK: xor <8 x i16>
294 // CHECK: and <8 x i16>
295
296 res_vbs = vec_vandc(vbs, vbs); // CHECK: xor <8 x i16>
297 // CHECK: and <8 x i16>
298
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000299 res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32>
300 // CHECK: and <4 x i32>
301
Anton Yartsev05e35552010-08-16 16:22:12 +0000302 res_vi = vec_vandc(vbi, vi); // CHECK: xor <4 x i32>
303 // CHECK: and <4 x i32>
304
305 res_vi = vec_vandc(vi, vbi); // CHECK: xor <4 x i32>
306 // CHECK: and <4 x i32>
307
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000308 res_vui = vec_vandc(vui, vui); // CHECK: xor <4 x i32>
309 // CHECK: and <4 x i32>
310
Anton Yartsev05e35552010-08-16 16:22:12 +0000311 res_vui = vec_vandc(vbi, vui); // CHECK: xor <4 x i32>
312 // CHECK: and <4 x i32>
313
314 res_vui = vec_vandc(vui, vbi); // CHECK: xor <4 x i32>
315 // CHECK: and <4 x i32>
316
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000317 res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32>
318 // CHECK: and <4 x i32>
Anton Yartsev05e35552010-08-16 16:22:12 +0000319
320 res_vf = vec_vandc(vbi, vf); // CHECK: xor <4 x i32>
321 // CHECK: and <4 x i32>
322
323 res_vf = vec_vandc(vf, vbi); // CHECK: xor <4 x i32>
324 // CHECK: and <4 x i32>
Chris Lattner35b21b82010-06-27 01:06:27 +0000325}
Chris Lattnerdd173942010-04-14 03:54:58 +0000326
Chris Lattner35b21b82010-06-27 01:06:27 +0000327// CHECK: i32 @test2
328int test2() {
Chris Lattnerdd173942010-04-14 03:54:58 +0000329 /* vec_avg */
Chris Lattner35b21b82010-06-27 01:06:27 +0000330 res_vsc = vec_avg(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vavgsb
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000331 res_vuc = vec_avg(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
332 res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
333 res_vus = vec_avg(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
334 res_vi = vec_avg(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
335 res_vui = vec_avg(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
336 res_vsc = vec_vavgsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
337 res_vuc = vec_vavgub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
Chris Lattnerdd173942010-04-14 03:54:58 +0000338 res_vs = vec_vavgsh(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000339 res_vus = vec_vavguh(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
Chris Lattnerdd173942010-04-14 03:54:58 +0000340 res_vi = vec_vavgsw(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000341 res_vui = vec_vavguw(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
Chris Lattnerdd173942010-04-14 03:54:58 +0000342
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000343 /* vec_ceil */
344 res_vf = vec_ceil(vf); // CHECK: @llvm.ppc.altivec.vrfip
345 res_vf = vec_vrfip(vf); // CHECK: @llvm.ppc.altivec.vrfip
Chris Lattnerdd173942010-04-14 03:54:58 +0000346
347 /* vec_cmpb */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000348 res_vi = vec_cmpb(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
Chris Lattnerdd173942010-04-14 03:54:58 +0000349 res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
350
351 /* vec_cmpeq */
Chris Lattner35b21b82010-06-27 01:06:27 +0000352 vsc = vec_cmpeq(vsc, vsc); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpequb
353 vuc = vec_cmpeq(vuc, vuc); // CHCK: @llvm.ppc.altivec.vcmpequb
354 vs = vec_cmpeq(vs, vs); // CHCK: @llvm.ppc.altivec.vcmpequh
355 vs = vec_cmpeq(vus, vus); // CHCK: @llvm.ppc.altivec.vcmpequh
356 vi = vec_cmpeq(vi, vi); // CHCK: @llvm.ppc.altivec.vcmpequw
357 vui = vec_cmpeq(vui, vui); // CHCK: @llvm.ppc.altivec.vcmpequw
358 vf = vec_cmpeq(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpeqfp
Chris Lattnerdd173942010-04-14 03:54:58 +0000359
360 /* vec_cmpge */
Chris Lattner35b21b82010-06-27 01:06:27 +0000361 vf = vec_cmpge(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpgefp
362 vf = vec_vcmpgefp(vf, vf); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
Chris Lattnerdd173942010-04-14 03:54:58 +0000363
Chris Lattner35b21b82010-06-27 01:06:27 +0000364}
365
366// CHECK: define i32 @test5
367int test5() {
368
Chris Lattnerdd173942010-04-14 03:54:58 +0000369 /* vec_cmpgt */
Chris Lattner35b21b82010-06-27 01:06:27 +0000370 vsc = vec_cmpgt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
371 vuc = vec_cmpgt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
372 vs = vec_cmpgt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
373 vus = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
374 vi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
375 vui = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
376 vf = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
377 vsc = vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
378 vuc = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
379 vs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
380 vus = vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
381 vi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
382 vui = vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
383 vf = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
Chris Lattnerdd173942010-04-14 03:54:58 +0000384
385 /* vec_cmple */
Chris Lattner35b21b82010-06-27 01:06:27 +0000386 vf = vec_cmple(vf, vf); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
387}
Chris Lattnerdd173942010-04-14 03:54:58 +0000388
Chris Lattner35b21b82010-06-27 01:06:27 +0000389// CHECK: define i32 @test6
390int test6() {
Chris Lattnerdd173942010-04-14 03:54:58 +0000391 /* vec_cmplt */
Chris Lattner35b21b82010-06-27 01:06:27 +0000392 vsc =vec_cmplt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
393 vsc =vec_cmplt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
394 vs = vec_cmplt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
395 vs = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
396 vi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
397 vui = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
398 vf = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000399
400 /* vec_ctf */
401 res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx
402 res_vf = vec_ctf(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
403 res_vf = vec_vcfsx(vi, 0); // CHECK: @llvm.ppc.altivec.vcfsx
404 res_vf = vec_vcfux(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
405
406 /* vec_cts */
407 res_vi = vec_cts(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
408 res_vi = vec_vctsxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
409
410 /* vec_ctu */
411 res_vui = vec_ctu(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
412 res_vui = vec_vctuxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
413
414 /* vec_dss */
415 vec_dss(param_i); // CHECK: @llvm.ppc.altivec.dss
416
417 /* vec_dssall */
418 vec_dssall(); // CHECK: @llvm.ppc.altivec.dssall
419
420 /* vec_dst */
421 vec_dst(&vsc, 0, 0); // CHECK: @llvm.ppc.altivec.dst
422
423 /* vec_dstst */
424 vec_dstst(&vs, 0, 0); // CHECK: @llvm.ppc.altivec.dstst
425
426 /* vec_dststt */
427 vec_dststt(&param_i, 0, 0); // CHECK: @llvm.ppc.altivec.dststt
428
429 /* vec_dstt */
430 vec_dstt(&vf, 0, 0); // CHECK: @llvm.ppc.altivec.dstt
431
432 /* vec_expte */
433 res_vf = vec_expte(vf); // CHECK: @llvm.ppc.altivec.vexptefp
434 res_vf = vec_vexptefp(vf); // CHECK: @llvm.ppc.altivec.vexptefp
435
436 /* vec_floor */
437 res_vf = vec_floor(vf); // CHECK: @llvm.ppc.altivec.vrfim
438 res_vf = vec_vrfim(vf); // CHECK: @llvm.ppc.altivec.vrfim
439
440 /* vec_ld */
441 res_vsc = vec_ld(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
442 res_vsc = vec_ld(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
443 res_vuc = vec_ld(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
444 res_vuc = vec_ld(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
445 res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
446 res_vs = vec_ld(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
447 res_vus = vec_ld(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
448 res_vus = vec_ld(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
449 res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
450 res_vi = vec_ld(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
451 res_vui = vec_ld(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
452 res_vui = vec_ld(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
453 res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
454 res_vf = vec_ld(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
455 res_vsc = vec_lvx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
456 res_vsc = vec_lvx(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
457 res_vuc = vec_lvx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
458 res_vuc = vec_lvx(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
459 res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
460 res_vs = vec_lvx(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
461 res_vus = vec_lvx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
462 res_vus = vec_lvx(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
463 res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
464 res_vi = vec_lvx(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
465 res_vui = vec_lvx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
466 res_vui = vec_lvx(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
467 res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
468 res_vf = vec_lvx(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
469
470 /* vec_lde */
471 res_vsc = vec_lde(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
472 res_vuc = vec_lde(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
473 res_vs = vec_lde(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
474 res_vus = vec_lde(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
475 res_vi = vec_lde(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
476 res_vui = vec_lde(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
477 res_vf = vec_lde(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
478 res_vsc = vec_lvebx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx
479 res_vuc = vec_lvebx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx
480 res_vs = vec_lvehx(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx
481 res_vus = vec_lvehx(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx
482 res_vi = vec_lvewx(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx
483 res_vui = vec_lvewx(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx
484 res_vf = vec_lvewx(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
485
486 /* vec_ldl */
487 res_vsc = vec_ldl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
488 res_vsc = vec_ldl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
489 res_vuc = vec_ldl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
490 res_vuc = vec_ldl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
491 res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
492 res_vs = vec_ldl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
493 res_vus = vec_ldl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
494 res_vus = vec_ldl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
495 res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
496 res_vi = vec_ldl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
497 res_vui = vec_ldl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
498 res_vui = vec_ldl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
499 res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
500 res_vf = vec_ldl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
501 res_vsc = vec_lvxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
502 res_vsc = vec_lvxl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
503 res_vuc = vec_lvxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
504 res_vuc = vec_lvxl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
505 res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
506 res_vs = vec_lvxl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
507 res_vus = vec_lvxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
508 res_vus = vec_lvxl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
509 res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
510 res_vi = vec_lvxl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
511 res_vui = vec_lvxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
512 res_vui = vec_lvxl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
513 res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
514 res_vf = vec_lvxl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
515
516 /* vec_loge */
517 res_vf = vec_loge(vf); // CHECK: @llvm.ppc.altivec.vlogefp
518 res_vf = vec_vlogefp(vf); // CHECK: @llvm.ppc.altivec.vlogefp
519
520 /* vec_lvsl */
521 res_vuc = vec_lvsl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsl
522
523 /* vec_lvsr */
524 res_vuc = vec_lvsr(0, &param_i); // CHECK: @llvm.ppc.altivec.lvsr
525
526 /* vec_madd */
527 res_vf =vec_madd(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
528 res_vf = vec_vmaddfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
529
530 /* vec_madds */
531 res_vs = vec_madds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
532 res_vs = vec_vmhaddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
Chris Lattnerdd173942010-04-14 03:54:58 +0000533
534 /* vec_max */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000535 res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
536 res_vuc = vec_max(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
537 res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
Chris Lattnerdd173942010-04-14 03:54:58 +0000538 res_vus = vec_max(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000539 res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
540 res_vui = vec_max(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
541 res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
542 res_vsc = vec_vmaxsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
543 res_vuc = vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
544 res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
545 res_vus = vec_vmaxuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
546 res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
Chris Lattnerdd173942010-04-14 03:54:58 +0000547 res_vui = vec_vmaxuw(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000548 res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
549
550 /* vec_mergeh */
551 res_vsc = vec_mergeh(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
552 res_vuc = vec_mergeh(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
553 res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
554 res_vus = vec_mergeh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
555 res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
556 res_vui = vec_mergeh(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
557 res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
558 res_vsc = vec_vmrghb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
559 res_vuc = vec_vmrghb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
560 res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
561 res_vus = vec_vmrghh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
562 res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
563 res_vui = vec_vmrghw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
564 res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
565
566 /* vec_mergel */
567 res_vsc = vec_mergel(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
568 res_vuc = vec_mergel(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
569 res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
570 res_vus = vec_mergel(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
571 res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
572 res_vui = vec_mergel(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
573 res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
574 res_vsc = vec_vmrglb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
575 res_vuc = vec_vmrglb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
576 res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
577 res_vus = vec_vmrglh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
578 res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
579 res_vui = vec_vmrglw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
580 res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
Chris Lattnerdd173942010-04-14 03:54:58 +0000581
582 /* vec_mfvscr */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000583 vus = vec_mfvscr(); // CHECK: @llvm.ppc.altivec.mfvscr
Chris Lattnerdd173942010-04-14 03:54:58 +0000584
585 /* vec_min */
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000586 res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
587 res_vuc = vec_min(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
588 res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
Chris Lattnerdd173942010-04-14 03:54:58 +0000589 res_vus = vec_min(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000590 res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
591 res_vui = vec_min(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
592 res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
593 res_vsc = vec_vminsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
594 res_vuc = vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
595 res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
596 res_vus = vec_vminuh(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
597 res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
Chris Lattnerdd173942010-04-14 03:54:58 +0000598 res_vui = vec_vminuw(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000599 res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
600
601 /* vec_mladd */
602 res_vus = vec_mladd(vus, vus, vus); // CHECK: mul <8 x i16>
603 // CHECK: add <8 x i16>
604
Chris Lattnera4d71452010-06-26 21:25:03 +0000605 res_vs = vec_mladd(vus, vs, vs); // CHECK: mul nsw <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000606 // CHECK: add nsw <8 x i16>
607
Chris Lattnera4d71452010-06-26 21:25:03 +0000608 res_vs = vec_mladd(vs, vus, vus); // CHECK: mul nsw <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000609 // CHECK: add nsw <8 x i16>
610
Chris Lattnera4d71452010-06-26 21:25:03 +0000611 res_vs = vec_mladd(vs, vs, vs); // CHECK: mul nsw <8 x i16>
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000612 // CHECK: add nsw <8 x i16>
613
614 /* vec_mradds */
615 res_vs = vec_mradds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
616 res_vs = vec_vmhraddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
617
618 /* vec_msum */
619 res_vi = vec_msum(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
620 res_vui = vec_msum(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
621 res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
622 res_vui = vec_msum(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
623 res_vi = vec_vmsummbm(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
624 res_vui = vec_vmsumubm(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
625 res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
626 res_vui = vec_vmsumuhm(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
627
628 /* vec_msums */
629 res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
630 res_vui = vec_msums(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
631 res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
632 res_vui = vec_vmsumuhs(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
Chris Lattnerdd173942010-04-14 03:54:58 +0000633
634 /* vec_mtvscr */
Chris Lattnerab866b42010-04-14 20:35:39 +0000635 vec_mtvscr(vsc); // CHECK: @llvm.ppc.altivec.mtvscr
Chris Lattnerdd173942010-04-14 03:54:58 +0000636
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000637 /* vec_mule */
638 res_vs = vec_mule(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
639 res_vus = vec_mule(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
640 res_vi = vec_mule(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
641 res_vui = vec_mule(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
642 res_vs = vec_vmulesb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
643 res_vus = vec_vmuleub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
644 res_vi = vec_vmulesh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
645 res_vui = vec_vmuleuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
Chris Lattnerdd173942010-04-14 03:54:58 +0000646
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +0000647 /* vec_mulo */
648 res_vs = vec_mulo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
649 res_vus = vec_mulo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
650 res_vi = vec_mulo(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
651 res_vui = vec_mulo(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
652 res_vs = vec_vmulosb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
653 res_vus = vec_vmuloub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
654 res_vi = vec_vmulosh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
655 res_vui = vec_vmulouh(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
656
657 /* vec_nmsub */
658 res_vf = vec_nmsub(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
659 res_vf = vec_vnmsubfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
660
661 /* vec_nor */
662 res_vsc = vec_nor(vsc, vsc); // CHECK: or <16 x i8>
663 // CHECK: xor <16 x i8>
664
665 res_vuc = vec_nor(vuc, vuc); // CHECK: or <16 x i8>
666 // CHECK: xor <16 x i8>
667
668 res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16>
669 // CHECK: xor <8 x i16>
670
671 res_vus = vec_nor(vus, vus); // CHECK: or <8 x i16>
672 // CHECK: xor <8 x i16>
673
674 res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32>
675 // CHECK: xor <4 x i32>
676
677 res_vui = vec_nor(vui, vui); // CHECK: or <4 x i32>
678 // CHECK: xor <4 x i32>
679
680 res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32>
681 // CHECK: xor <4 x i32>
682
683 res_vsc = vec_vnor(vsc, vsc); // CHECK: or <16 x i8>
684 // CHECK: xor <16 x i8>
685
686 res_vuc = vec_vnor(vuc, vuc); // CHECK: or <16 x i8>
687 // CHECK: xor <16 x i8>
688
689 res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16>
690 // CHECK: xor <8 x i16>
691
692 res_vus = vec_vnor(vus, vus); // CHECK: or <8 x i16>
693 // CHECK: xor <8 x i16>
694
695 res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32>
696 // CHECK: xor <4 x i32>
697
698 res_vui = vec_vnor(vui, vui); // CHECK: or <4 x i32>
699 // CHECK: xor <4 x i32>
700
701 res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32>
702 // CHECK: xor <4 x i32>
703
704 /* vec_or */
705 res_vsc = vec_or(vsc, vsc); // CHECK: or <16 x i8>
706 res_vuc = vec_or(vuc, vuc); // CHECK: or <16 x i8>
707 res_vs = vec_or(vs, vs); // CHECK: or <8 x i16>
708 res_vus = vec_or(vus, vus); // CHECK: or <8 x i16>
709 res_vi = vec_or(vi, vi); // CHECK: or <4 x i32>
710 res_vui = vec_or(vui, vui); // CHECK: or <4 x i32>
711 res_vf = vec_or(vf, vf); // CHECK: or <4 x i32>
712 res_vsc = vec_vor(vsc, vsc); // CHECK: or <16 x i8>
713 res_vuc = vec_vor(vuc, vuc); // CHECK: or <16 x i8>
714 res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16>
715 res_vus = vec_vor(vus, vus); // CHECK: or <8 x i16>
716 res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32>
717 res_vui = vec_vor(vui, vui); // CHECK: or <4 x i32>
718 res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32>
719
720 /* vec_pack */
721 res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
722 res_vuc = vec_pack(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
723 res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
724 res_vus = vec_pack(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
725 res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
726 res_vuc = vec_vpkuhum(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
727 res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
728 res_vus = vec_vpkuwum(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
729
730 /* vec_packpx */
731 res_vp = vec_packpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
732 res_vp = vec_vpkpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
733
734 /* vec_packs */
735 res_vsc = vec_packs(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
736 res_vuc = vec_packs(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
737 res_vs = vec_packs(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
738 res_vus = vec_packs(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
739 res_vsc = vec_vpkshss(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
740 res_vuc = vec_vpkuhus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
741 res_vs = vec_vpkswss(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
742 res_vus = vec_vpkuwus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
743
744 /* vec_packsu */
745 res_vuc = vec_packsu(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
746 res_vuc = vec_packsu(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
747 res_vus = vec_packsu(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
748 res_vus = vec_packsu(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
749 res_vuc = vec_vpkshus(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
750 res_vuc = vec_vpkshus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
751 res_vus = vec_vpkswus(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
752 res_vus = vec_vpkswus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
753
754 /* vec_perm */
755 res_vsc = vec_perm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
756 res_vuc = vec_perm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
757 res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
758 res_vus = vec_perm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
759 res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
760 res_vui = vec_perm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
761 res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
762 res_vsc = vec_vperm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
763 res_vuc = vec_vperm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
764 res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
765 res_vus = vec_vperm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
766 res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
767 res_vui = vec_vperm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
768 res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
769
770 /* vec_re */
771 res_vf = vec_re(vf); // CHECK: @llvm.ppc.altivec.vrefp
772 res_vf = vec_vrefp(vf); // CHECK: @llvm.ppc.altivec.vrefp
773
774 /* vec_rl */
775 res_vsc = vec_rl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
776 res_vuc = vec_rl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
777 res_vs = vec_rl(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
778 res_vus = vec_rl(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
779 res_vi = vec_rl(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
780 res_vui = vec_rl(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
781 res_vsc = vec_vrlb(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
782 res_vuc = vec_vrlb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
783 res_vs = vec_vrlh(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
784 res_vus = vec_vrlh(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
785 res_vi = vec_vrlw(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
786 res_vui = vec_vrlw(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
787
788 /* vec_round */
789 res_vf = vec_round(vf); // CHECK: @llvm.ppc.altivec.vrfin
790 res_vf = vec_vrfin(vf); // CHECK: @llvm.ppc.altivec.vrfin
791
792 /* vec_rsqrte */
793 res_vf = vec_rsqrte(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
794 res_vf = vec_vrsqrtefp(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
795
796 /* vec_sel */
797 res_vsc = vec_sel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
798 // CHECK: and <16 x i8>
799 // CHECK: and <16 x i8>
800 // CHECK: or <16 x i8>
801
802 res_vuc = vec_sel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
803 // CHECK: and <16 x i8>
804 // CHECK: and <16 x i8>
805 // CHECK: or <16 x i8>
806
807 res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16>
808 // CHECK: and <8 x i16>
809 // CHECK: and <8 x i16>
810 // CHECK: or <8 x i16>
811
812
813 res_vus = vec_sel(vus, vus, vus); // CHECK: xor <8 x i16>
814 // CHECK: and <8 x i16>
815 // CHECK: and <8 x i16>
816 // CHECK: or <8 x i16>
817
818 res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32>
819 // CHECK: and <4 x i32>
820 // CHECK: and <4 x i32>
821 // CHECK: or <4 x i32>
822
823
824 res_vui = vec_sel(vui, vui, vui); // CHECK: xor <4 x i32>
825 // CHECK: and <4 x i32>
826 // CHECK: and <4 x i32>
827 // CHECK: or <4 x i32>
828
829
830 res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32>
831 // CHECK: and <4 x i32>
832 // CHECK: and <4 x i32>
833 // CHECK: or <4 x i32>
834
835 res_vsc = vec_vsel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
836 // CHECK: and <16 x i8>
837 // CHECK: and <16 x i8>
838 // CHECK: or <16 x i8>
839
840 res_vuc = vec_vsel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
841 // CHECK: and <16 x i8>
842 // CHECK: and <16 x i8>
843 // CHECK: or <16 x i8>
844
845 res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16>
846 // CHECK: and <8 x i16>
847 // CHECK: and <8 x i16>
848 // CHECK: or <8 x i16>
849
850
851 res_vus = vec_vsel(vus, vus, vus); // CHECK: xor <8 x i16>
852 // CHECK: and <8 x i16>
853 // CHECK: and <8 x i16>
854 // CHECK: or <8 x i16>
855
856 res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32>
857 // CHECK: and <4 x i32>
858 // CHECK: and <4 x i32>
859 // CHECK: or <4 x i32>
860
861
862 res_vui = vec_vsel(vui, vui, vui); // CHECK: xor <4 x i32>
863 // CHECK: and <4 x i32>
864 // CHECK: and <4 x i32>
865 // CHECK: or <4 x i32>
866
867
868 res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32>
869 // CHECK: and <4 x i32>
870 // CHECK: and <4 x i32>
871 // CHECK: or <4 x i32>
872
873
874 /* vec_sl */
875 res_vsc = vec_sl(vsc, vuc); // CHECK: shl <16 x i8>
876 res_vuc = vec_sl(vuc, vuc); // CHECK: shl <16 x i8>
877 res_vs = vec_sl(vs, vus); // CHECK: shl <8 x i16>
878 res_vus = vec_sl(vus, vus); // CHECK: shl <8 x i16>
879 res_vi = vec_sl(vi, vui); // CHECK: shl <4 x i32>
880 res_vui = vec_sl(vui, vui); // CHECK: shl <4 x i32>
881 res_vsc = vec_vslb(vsc, vuc); // CHECK: shl <16 x i8>
882 res_vuc = vec_vslb(vuc, vuc); // CHECK: shl <16 x i8>
883 res_vs = vec_vslh(vs, vus); // CHECK: shl <8 x i16>
884 res_vus = vec_vslh(vus, vus); // CHECK: shl <8 x i16>
885 res_vi = vec_vslw(vi, vui); // CHECK: shl <4 x i32>
886 res_vui = vec_vslw(vui, vui); // CHECK: shl <4 x i32>
887
888 /* vec_sld */
889 res_vsc = vec_sld(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
890 res_vuc = vec_sld(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
891 res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
892 res_vus = vec_sld(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
893 res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
894 res_vui = vec_sld(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
895 res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
896 res_vsc = vec_vsldoi(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
897 res_vuc = vec_vsldoi(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
898 res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
899 res_vus = vec_vsldoi(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
900 res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
901 res_vui = vec_vsldoi(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
902 res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
903
904 /* vec_sll */
905 res_vsc = vec_sll(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
906 res_vsc = vec_sll(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
907 res_vsc = vec_sll(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
908 res_vuc = vec_sll(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
909 res_vuc = vec_sll(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
910 res_vuc = vec_sll(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
911 res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
912 res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
913 res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
914 res_vus = vec_sll(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
915 res_vus = vec_sll(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
916 res_vus = vec_sll(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
917 res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
918 res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
919 res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
920 res_vui = vec_sll(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
921 res_vui = vec_sll(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
922 res_vui = vec_sll(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
923 res_vsc = vec_vsl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
924 res_vsc = vec_vsl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
925 res_vsc = vec_vsl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
926 res_vuc = vec_vsl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
927 res_vuc = vec_vsl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
928 res_vuc = vec_vsl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
929 res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
930 res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
931 res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
932 res_vus = vec_vsl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
933 res_vus = vec_vsl(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
934 res_vus = vec_vsl(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
935 res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
936 res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
937 res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
938 res_vui = vec_vsl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
939 res_vui = vec_vsl(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
940 res_vui = vec_vsl(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
941
942 /* vec_slo */
943 res_vsc = vec_slo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
944 res_vsc = vec_slo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
945 res_vuc = vec_slo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
946 res_vuc = vec_slo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
947 res_vs = vec_slo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
948 res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
949 res_vus = vec_slo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
950 res_vus = vec_slo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
951 res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
952 res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
953 res_vui = vec_slo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
954 res_vui = vec_slo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
955 res_vf = vec_slo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
956 res_vf = vec_slo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
957 res_vsc = vec_vslo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
958 res_vsc = vec_vslo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
959 res_vuc = vec_vslo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
960 res_vuc = vec_vslo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
961 res_vs = vec_vslo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
962 res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
963 res_vus = vec_vslo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
964 res_vus = vec_vslo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
965 res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
966 res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
967 res_vui = vec_vslo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
968 res_vui = vec_vslo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
969 res_vf = vec_vslo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
970 res_vf = vec_vslo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
971
972 /* vec_splat */
973 res_vsc = vec_splat(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
974 res_vuc = vec_splat(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
975 res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
976 res_vus = vec_splat(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
977 res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
978 res_vui = vec_splat(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
979 res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
980 res_vsc = vec_vspltb(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
981 res_vuc = vec_vspltb(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
982 res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
983 res_vus = vec_vsplth(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
984 res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
985 res_vui = vec_vspltw(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
986 res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
987
988 /* vec_splat_s8 */
989 res_vsc = vec_splat_s8(0x09); // TODO: add check
990 res_vsc = vec_vspltisb(0x09); // TODO: add check
991
992 /* vec_splat_s16 */
993 res_vs = vec_splat_s16(0x09); // TODO: add check
994 res_vs = vec_vspltish(0x09); // TODO: add check
995
996 /* vec_splat_s32 */
997 res_vi = vec_splat_s32(0x09); // TODO: add check
998 res_vi = vec_vspltisw(0x09); // TODO: add check
999
1000 /* vec_splat_u8 */
1001 res_vuc = vec_splat_u8(0x09); // TODO: add check
1002
1003 /* vec_splat_u16 */
1004 res_vus = vec_splat_u16(0x09); // TODO: add check
1005
1006 /* vec_splat_u32 */
1007 res_vui = vec_splat_u32(0x09); // TODO: add check
1008
1009 /* vec_sr */
1010 res_vsc = vec_sr(vsc, vuc); // CHECK: shr <16 x i8>
1011 res_vuc = vec_sr(vuc, vuc); // CHECK: shr <16 x i8>
1012 res_vs = vec_sr(vs, vus); // CHECK: shr <8 x i16>
1013 res_vus = vec_sr(vus, vus); // CHECK: shr <8 x i16>
1014 res_vi = vec_sr(vi, vui); // CHECK: shr <4 x i32>
1015 res_vui = vec_sr(vui, vui); // CHECK: shr <4 x i32>
1016 res_vsc = vec_vsrb(vsc, vuc); // CHECK: shr <16 x i8>
1017 res_vuc = vec_vsrb(vuc, vuc); // CHECK: shr <16 x i8>
1018 res_vs = vec_vsrh(vs, vus); // CHECK: shr <8 x i16>
1019 res_vus = vec_vsrh(vus, vus); // CHECK: shr <8 x i16>
1020 res_vi = vec_vsrw(vi, vui); // CHECK: shr <4 x i32>
1021 res_vui = vec_vsrw(vui, vui); // CHECK: shr <4 x i32>
1022
1023 /* vec_sra */
1024 res_vsc = vec_sra(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1025 res_vuc = vec_sra(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1026 res_vs = vec_sra(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
1027 res_vus = vec_sra(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
1028 res_vi = vec_sra(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
1029 res_vui = vec_sra(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
1030 res_vsc = vec_vsrab(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1031 res_vuc = vec_vsrab(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
1032 res_vs = vec_vsrah(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
1033 res_vus = vec_vsrah(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
1034 res_vi = vec_vsraw(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
1035 res_vui = vec_vsraw(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
1036
1037 /* vec_srl */
1038 res_vsc = vec_srl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1039 res_vsc = vec_srl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
1040 res_vsc = vec_srl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
1041 res_vuc = vec_srl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1042 res_vuc = vec_srl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
1043 res_vuc = vec_srl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
1044 res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
1045 res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
1046 res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
1047 res_vus = vec_srl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
1048 res_vus = vec_srl(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
1049 res_vus = vec_srl(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
1050 res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
1051 res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
1052 res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
1053 res_vui = vec_srl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
1054 res_vui = vec_srl(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
1055 res_vui = vec_srl(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
1056 res_vsc = vec_vsr(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1057 res_vsc = vec_vsr(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
1058 res_vsc = vec_vsr(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
1059 res_vuc = vec_vsr(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
1060 res_vuc = vec_vsr(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
1061 res_vuc = vec_vsr(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
1062 res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
1063 res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
1064 res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
1065 res_vus = vec_vsr(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
1066 res_vus = vec_vsr(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
1067 res_vus = vec_vsr(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
1068 res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
1069 res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
1070 res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
1071 res_vui = vec_vsr(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
1072 res_vui = vec_vsr(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
1073 res_vui = vec_vsr(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
1074
1075 /* vec_sro */
1076 res_vsc = vec_sro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1077 res_vsc = vec_sro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1078 res_vuc = vec_sro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1079 res_vuc = vec_sro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1080 res_vs = vec_sro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
1081 res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
1082 res_vus = vec_sro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
1083 res_vus = vec_sro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
1084 res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
1085 res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
1086 res_vui = vec_sro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
1087 res_vui = vec_sro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
1088 res_vf = vec_sro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
1089 res_vf = vec_sro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
1090 res_vsc = vec_vsro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1091 res_vsc = vec_vsro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1092 res_vuc = vec_vsro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
1093 res_vuc = vec_vsro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
1094 res_vs = vec_vsro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
1095 res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
1096 res_vus = vec_vsro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
1097 res_vus = vec_vsro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
1098 res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
1099 res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
1100 res_vui = vec_vsro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
1101 res_vui = vec_vsro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
1102 res_vf = vec_vsro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
1103 res_vf = vec_vsro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
1104
1105 /* vec_st */
1106 vec_st(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
1107 vec_st(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
1108 vec_st(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
1109 vec_st(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
1110 vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
1111 vec_st(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
1112 vec_st(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
1113 vec_st(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
1114 vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
1115 vec_st(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
1116 vec_st(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
1117 vec_st(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
1118 vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
1119 vec_st(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
1120 vec_stvx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
1121 vec_stvx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
1122 vec_stvx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
1123 vec_stvx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
1124 vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
1125 vec_stvx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
1126 vec_stvx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
1127 vec_stvx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
1128 vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
1129 vec_stvx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
1130 vec_stvx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
1131 vec_stvx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
1132 vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
1133 vec_stvx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
1134
1135 /* vec_ste */
1136 vec_ste(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
1137 vec_ste(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
1138 vec_ste(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
1139 vec_ste(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
1140 vec_ste(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
1141 vec_ste(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
1142 vec_ste(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
1143 vec_stvebx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
1144 vec_stvebx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
1145 vec_stvehx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
1146 vec_stvehx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
1147 vec_stvewx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
1148 vec_stvewx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
1149 vec_stvewx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
1150
1151 /* vec_stl */
1152 vec_stl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
1153 vec_stl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
1154 vec_stl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
1155 vec_stl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
1156 vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
1157 vec_stl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
1158 vec_stl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
1159 vec_stl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1160 vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
1161 vec_stl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
1162 vec_stl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
1163 vec_stl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
1164 vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1165 vec_stl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1166 vec_stvxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
1167 vec_stvxl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
1168 vec_stvxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
1169 vec_stvxl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
1170 vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
1171 vec_stvxl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
1172 vec_stvxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
1173 vec_stvxl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1174 vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
1175 vec_stvxl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
1176 vec_stvxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
1177 vec_stvxl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
1178 vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1179 vec_stvxl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1180
1181 /* vec_sub */
1182 res_vsc = vec_sub(vsc, vsc); // CHECK: sub nsw <16 x i8>
1183 res_vuc = vec_sub(vuc, vuc); // CHECK: sub <16 x i8>
1184 res_vs = vec_sub(vs, vs); // CHECK: sub nsw <8 x i16>
1185 res_vus = vec_sub(vus, vus); // CHECK: sub <8 x i16>
1186 res_vi = vec_sub(vi, vi); // CHECK: sub nsw <4 x i32>
1187 res_vui = vec_sub(vui, vui); // CHECK: sub <4 x i32>
1188 res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float>
1189 res_vsc = vec_vsububm(vsc, vsc); // CHECK: sub nsw <16 x i8>
1190 res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8>
1191 res_vs = vec_vsubuhm(vs, vs); // CHECK: sub nsw <8 x i16>
1192 res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16>
1193 res_vi = vec_vsubuwm(vi, vi); // CHECK: sub nsw <4 x i32>
1194 res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32>
1195 res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float>
1196
1197 /* vec_subc */
1198 res_vui = vec_subc(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1199 res_vui = vec_vsubcuw(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
1200
1201 /* vec_subs */
1202 res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1203 res_vuc = vec_subs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1204 res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1205 res_vus = vec_subs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1206 res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1207 res_vui = vec_subs(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1208 res_vsc = vec_vsubsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
1209 res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
1210 res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1211 res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1212 res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
1213 res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
1214
1215 /* vec_sum4s */
1216 res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1217 res_vui = vec_sum4s(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1218 res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1219 res_vi = vec_vsum4sbs(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
1220 res_vui = vec_vsum4ubs(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
1221 res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
1222
1223 /* vec_sum2s */
1224 res_vi = vec_sum2s(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1225 res_vi = vec_vsum2sws(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
1226
1227 /* vec_sums */
1228 res_vi = vec_sums(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1229 res_vi = vec_vsumsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
1230
1231 /* vec_trunc */
1232 res_vf = vec_trunc(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1233 res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1234
1235 /* vec_unpackh */
1236 res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1237 res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1238 res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
1239 res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
1240
1241 /* vec_unpackl */
1242 res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1243 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1244 res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
1245 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
1246
1247 /* vec_xor */
1248 res_vsc = vec_xor(vsc, vsc); // CHECK: xor <16 x i8>
1249 res_vuc = vec_xor(vuc, vuc); // CHECK: xor <16 x i8>
1250 res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16>
1251 res_vus = vec_xor(vus, vus); // CHECK: xor <8 x i16>
1252 res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32>
1253 res_vui = vec_xor(vui, vui); // CHECK: xor <4 x i32>
1254 res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32>
1255 res_vsc = vec_vxor(vsc, vsc); // CHECK: xor <16 x i8>
1256 res_vuc = vec_vxor(vuc, vuc); // CHECK: xor <16 x i8>
1257 res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16>
1258 res_vus = vec_vxor(vus, vus); // CHECK: xor <8 x i16>
1259 res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32>
1260 res_vui = vec_vxor(vui, vui); // CHECK: xor <4 x i32>
1261 res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32>
1262
1263 /* ------------------------------ predicates -------------------------------------- */
Chris Lattnerdd173942010-04-14 03:54:58 +00001264
1265 /* vec_all_eq */
Chris Lattnerab866b42010-04-14 20:35:39 +00001266 res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001267 res_i = vec_all_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1268 res_i = vec_all_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1269 res_i = vec_all_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1270 res_i = vec_all_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1271 res_i = vec_all_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1272 res_i = vec_all_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1273
1274 /* vec_all_ge */
Chris Lattnerab866b42010-04-14 20:35:39 +00001275 res_i = vec_all_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001276 res_i = vec_all_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1277 res_i = vec_all_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1278 res_i = vec_all_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1279 res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1280 res_i = vec_all_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001281 res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001282
1283 /* vec_all_gt */
Chris Lattnerab866b42010-04-14 20:35:39 +00001284 res_i = vec_all_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001285 res_i = vec_all_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1286 res_i = vec_all_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1287 res_i = vec_all_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1288 res_i = vec_all_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1289 res_i = vec_all_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1290 res_i = vec_all_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1291
1292 /* vec_all_in */
1293 res_i = vec_all_in(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1294
1295 /* vec_all_le */
Chris Lattnerab866b42010-04-14 20:35:39 +00001296 res_i = vec_all_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001297 res_i = vec_all_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1298 res_i = vec_all_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1299 res_i = vec_all_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1300 res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1301 res_i = vec_all_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001302 res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001303
1304 /* vec_all_nan */
1305 res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1306
1307 /* vec_all_ne */
Chris Lattnerab866b42010-04-14 20:35:39 +00001308 res_i = vec_all_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001309 res_i = vec_all_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1310 res_i = vec_all_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1311 res_i = vec_all_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1312 res_i = vec_all_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1313 res_i = vec_all_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1314 res_i = vec_all_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1315
1316 /* vec_all_nge */
1317 res_i = vec_all_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1318
1319 /* vec_all_ngt */
1320 res_i = vec_all_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1321
1322 /* vec_all_nle */
1323 res_i = vec_all_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1324
1325 /* vec_all_nlt */
1326 res_i = vec_all_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1327
1328 /* vec_all_numeric */
1329 res_i = vec_all_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1330
1331 /* vec_any_eq */
Chris Lattnerab866b42010-04-14 20:35:39 +00001332 res_i = vec_any_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001333 res_i = vec_any_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1334 res_i = vec_any_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1335 res_i = vec_any_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1336 res_i = vec_any_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1337 res_i = vec_any_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1338 res_i = vec_any_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1339
1340 /* vec_any_ge */
Chris Lattnerab866b42010-04-14 20:35:39 +00001341 res_i = vec_any_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001342 res_i = vec_any_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1343 res_i = vec_any_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1344 res_i = vec_any_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1345 res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1346 res_i = vec_any_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001347 res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001348
1349 /* vec_any_gt */
Chris Lattnerab866b42010-04-14 20:35:39 +00001350 res_i = vec_any_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001351 res_i = vec_any_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1352 res_i = vec_any_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1353 res_i = vec_any_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1354 res_i = vec_any_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1355 res_i = vec_any_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1356 res_i = vec_any_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1357
1358 /* vec_any_le */
Chris Lattnerab866b42010-04-14 20:35:39 +00001359 res_i = vec_any_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001360 res_i = vec_any_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1361 res_i = vec_any_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1362 res_i = vec_any_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1363 res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1364 res_i = vec_any_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
Anton Korobeynikov4d3a7b02010-06-19 09:47:18 +00001365 res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001366
1367 /* vec_any_lt */
Chris Lattnerab866b42010-04-14 20:35:39 +00001368 res_i = vec_any_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001369 res_i = vec_any_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
1370 res_i = vec_any_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
1371 res_i = vec_any_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
1372 res_i = vec_any_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
1373 res_i = vec_any_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
1374 res_i = vec_any_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1375
1376 /* vec_any_nan */
1377 res_i = vec_any_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1378
1379 /* vec_any_ne */
Chris Lattnerab866b42010-04-14 20:35:39 +00001380 res_i = vec_any_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
Chris Lattnerdd173942010-04-14 03:54:58 +00001381 res_i = vec_any_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
1382 res_i = vec_any_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1383 res_i = vec_any_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
1384 res_i = vec_any_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1385 res_i = vec_any_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
1386 res_i = vec_any_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1387
1388 /* vec_any_nge */
1389 res_i = vec_any_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1390
1391 /* vec_any_ngt */
1392 res_i = vec_any_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1393
1394 /* vec_any_nle */
1395 res_i = vec_any_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
1396
1397 /* vec_any_nlt */
1398 res_i = vec_any_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
1399
1400 /* vec_any_numeric */
1401 res_i = vec_any_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
1402
1403 /* vec_any_out */
1404 res_i = vec_any_out(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
1405
1406 return 0;
1407}