blob: c89cf399a406c14be510148e1c4b0d39499d3b14 [file] [log] [blame]
Ulrich Weigand6af25592017-07-17 17:47:35 +00001// REQUIRES: systemz-registered-target
2// RUN: %clang_cc1 -target-cpu z14 -triple s390x-linux-gnu \
3// RUN: -O -fzvector -fno-lax-vector-conversions \
4// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +00005// RUN: %clang_cc1 -target-cpu z14 -triple s390x-linux-gnu \
6// RUN: -O -fzvector -fno-lax-vector-conversions \
7// RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
Ulrich Weigand6af25592017-07-17 17:47:35 +00008
9#include <vecintrin.h>
10
11volatile vector signed char vsc;
12volatile vector signed short vss;
13volatile vector signed int vsi;
14volatile vector signed long long vsl;
15volatile vector unsigned char vuc;
16volatile vector unsigned short vus;
17volatile vector unsigned int vui;
18volatile vector unsigned long long vul;
19volatile vector bool char vbc;
20volatile vector bool short vbs;
21volatile vector bool int vbi;
22volatile vector bool long long vbl;
23volatile vector float vf;
24volatile vector double vd;
25
26volatile signed char sc;
27volatile signed short ss;
28volatile signed int si;
29volatile signed long long sl;
30volatile unsigned char uc;
31volatile unsigned short us;
32volatile unsigned int ui;
33volatile unsigned long long ul;
34volatile float f;
35volatile double d;
36
37const void * volatile cptr;
38const signed char * volatile cptrsc;
39const signed short * volatile cptrss;
40const signed int * volatile cptrsi;
41const signed long long * volatile cptrsl;
42const unsigned char * volatile cptruc;
43const unsigned short * volatile cptrus;
44const unsigned int * volatile cptrui;
45const unsigned long long * volatile cptrul;
46const float * volatile cptrf;
47const double * volatile cptrd;
48
49void * volatile ptr;
50signed char * volatile ptrsc;
51signed short * volatile ptrss;
52signed int * volatile ptrsi;
53signed long long * volatile ptrsl;
54unsigned char * volatile ptruc;
55unsigned short * volatile ptrus;
56unsigned int * volatile ptrui;
57unsigned long long * volatile ptrul;
58float * volatile ptrf;
59double * volatile ptrd;
60
61volatile unsigned int len;
62volatile int idx;
63int cc;
64
65void test_core(void) {
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +000066 // CHECK-ASM-LABEL: test_core
67 vector float vf2;
68 vector double vd2;
69
70 f = vec_extract(vf, 0);
71 // CHECK: extractelement <4 x float> %{{.*}}, i32 0
72 // CHECK-ASM: vstef
Ulrich Weigand6af25592017-07-17 17:47:35 +000073 f = vec_extract(vf, idx);
74 // CHECK: extractelement <4 x float> %{{.*}}, i32 %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +000075 // CHECK-ASM: vlgvf
76 d = vec_extract(vd, 0);
77 // CHECK: extractelement <2 x double> %{{.*}}, i32 0
78 // CHECK-ASM: vsteg
Ulrich Weigand6af25592017-07-17 17:47:35 +000079 d = vec_extract(vd, idx);
80 // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +000081 // CHECK-ASM: vlgvg
Ulrich Weigand6af25592017-07-17 17:47:35 +000082
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +000083 vf2 = vf;
84 vf = vec_insert(f, vf2, 0);
85 // CHECK: insertelement <4 x float> %{{.*}}, float %{{.*}}, i32 0
86 // CHECK-ASM: vlef
87 vf = vec_insert(0.0f, vf, 1);
88 // CHECK: insertelement <4 x float> %{{.*}}, float 0.000000e+00, i32 1
89 // CHECK-ASM: vleif %{{.*}}, 0, 1
Ulrich Weigand22ca9c62018-12-20 13:09:09 +000090 vf = vec_insert(f, vf, idx);
Ulrich Weigand6af25592017-07-17 17:47:35 +000091 // CHECK: insertelement <4 x float> %{{.*}}, float %{{.*}}, i32 %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +000092 // CHECK-ASM: vlvgf
93 vd2 = vd;
94 vd = vec_insert(d, vd2, 0);
95 // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 0
96 // CHECK-ASM: vleg
97 vd = vec_insert(0.0, vd, 1);
98 // CHECK: insertelement <2 x double> %{{.*}}, double 0.000000e+00, i32 1
99 // CHECK-ASM: vleig %{{.*}}, 0, 1
Ulrich Weigand22ca9c62018-12-20 13:09:09 +0000100 vd = vec_insert(d, vd, idx);
Ulrich Weigand6af25592017-07-17 17:47:35 +0000101 // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000102 // CHECK-ASM: vlvgg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000103
104 vf = vec_promote(f, idx);
105 // CHECK: insertelement <4 x float> undef, float %{{.*}}, i32 %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000106 // CHECK-ASM: vlvgf
Ulrich Weigand6af25592017-07-17 17:47:35 +0000107 vd = vec_promote(d, idx);
108 // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000109 // CHECK-ASM: vlvgg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000110
111 vf = vec_insert_and_zero(cptrf);
Ulrich Weigand22ca9c62018-12-20 13:09:09 +0000112 // CHECK: insertelement <4 x float> <float 0.000000e+00, float undef, float 0.000000e+00, float 0.000000e+00>, float %{{.*}}, i32 1
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000113 // CHECK-ASM: vllezf
Ulrich Weigand6af25592017-07-17 17:47:35 +0000114 vd = vec_insert_and_zero(cptrd);
115 // CHECK: insertelement <2 x double> <double undef, double 0.000000e+00>, double %{{.*}}, i32 0
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000116 // CHECK-ASM: vllezg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000117
118 vf = vec_perm(vf, vf, vuc);
119 // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000120 // CHECK-ASM: vperm
Ulrich Weigand6af25592017-07-17 17:47:35 +0000121 vd = vec_perm(vd, vd, vuc);
122 // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000123 // CHECK-ASM: vperm
Ulrich Weigand6af25592017-07-17 17:47:35 +0000124
125 vul = vec_bperm_u128(vuc, vuc);
126 // CHECK: call <2 x i64> @llvm.s390.vbperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000127 // CHECK-ASM: vbperm
Ulrich Weigand6af25592017-07-17 17:47:35 +0000128
129 vf = vec_sel(vf, vf, vui);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000130 // CHECK-ASM: vsel
Ulrich Weigand6af25592017-07-17 17:47:35 +0000131 vf = vec_sel(vf, vf, vbi);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000132 // CHECK-ASM: vsel
Ulrich Weigand6af25592017-07-17 17:47:35 +0000133 vd = vec_sel(vd, vd, vul);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000134 // CHECK-ASM: vsel
Ulrich Weigand6af25592017-07-17 17:47:35 +0000135 vd = vec_sel(vd, vd, vbl);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000136 // CHECK-ASM: vsel
Ulrich Weigand6af25592017-07-17 17:47:35 +0000137
138 vf = vec_gather_element(vf, vui, cptrf, 0);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000139 // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000140 vf = vec_gather_element(vf, vui, cptrf, 1);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000141 // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 1
Ulrich Weigand6af25592017-07-17 17:47:35 +0000142 vf = vec_gather_element(vf, vui, cptrf, 2);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000143 // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 2
Ulrich Weigand6af25592017-07-17 17:47:35 +0000144 vf = vec_gather_element(vf, vui, cptrf, 3);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000145 // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 3
Ulrich Weigand6af25592017-07-17 17:47:35 +0000146 vd = vec_gather_element(vd, vul, cptrd, 0);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000147 // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000148 vd = vec_gather_element(vd, vul, cptrd, 1);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000149 // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
Ulrich Weigand6af25592017-07-17 17:47:35 +0000150
151 vec_scatter_element(vf, vui, ptrf, 0);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000152 // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000153 vec_scatter_element(vf, vui, ptrf, 1);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000154 // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 1
Ulrich Weigand6af25592017-07-17 17:47:35 +0000155 vec_scatter_element(vf, vui, ptrf, 2);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000156 // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 2
Ulrich Weigand6af25592017-07-17 17:47:35 +0000157 vec_scatter_element(vf, vui, ptrf, 3);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000158 // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 3
Ulrich Weigand6af25592017-07-17 17:47:35 +0000159 vec_scatter_element(vd, vul, ptrd, 0);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000160 // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000161 vec_scatter_element(vd, vul, ptrd, 1);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000162 // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
Ulrich Weigand6af25592017-07-17 17:47:35 +0000163
164 vf = vec_xl(idx, cptrf);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000165 // CHECK-ASM: vl
Ulrich Weigand6af25592017-07-17 17:47:35 +0000166 vd = vec_xl(idx, cptrd);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000167 // CHECK-ASM: vl
Ulrich Weigand6af25592017-07-17 17:47:35 +0000168
169 vec_xst(vf, idx, ptrf);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000170 // CHECK-ASM: vst
Ulrich Weigand6af25592017-07-17 17:47:35 +0000171 vec_xst(vd, idx, ptrd);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000172 // CHECK-ASM: vst
Ulrich Weigand6af25592017-07-17 17:47:35 +0000173
174 vd = vec_load_bndry(cptrd, 64);
175 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000176 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000177 vf = vec_load_bndry(cptrf, 64);
178 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000179 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000180 vf = vec_load_bndry(cptrf, 128);
181 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 1)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000182 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000183 vf = vec_load_bndry(cptrf, 256);
184 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 2)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000185 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000186 vf = vec_load_bndry(cptrf, 512);
187 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 3)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000188 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000189 vf = vec_load_bndry(cptrf, 1024);
190 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 4)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000191 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000192 vf = vec_load_bndry(cptrf, 2048);
193 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 5)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000194 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000195 vf = vec_load_bndry(cptrf, 4096);
196 // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 6)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000197 // CHECK-ASM: vlbb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000198
199 vf = vec_load_len(cptrf, idx);
200 // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000201 // CHECK-ASM: vll
Ulrich Weigand6af25592017-07-17 17:47:35 +0000202 vd = vec_load_len(cptrd, idx);
203 // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000204 // CHECK-ASM: vll
Ulrich Weigand6af25592017-07-17 17:47:35 +0000205
206 vec_store_len(vf, ptrf, idx);
207 // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000208 // CHECK-ASM: vstl
Ulrich Weigand6af25592017-07-17 17:47:35 +0000209 vec_store_len(vd, ptrd, idx);
210 // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000211 // CHECK-ASM: vstl
Ulrich Weigand6af25592017-07-17 17:47:35 +0000212
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000213 vuc = vec_load_len_r(cptruc, 0);
214 // CHECK: call <16 x i8> @llvm.s390.vlrl(i32 0, i8* %{{.*}})
215 // CHECK-ASM: vlrl %{{.*}}, 0(%{{.*}}), 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000216 vuc = vec_load_len_r(cptruc, idx);
217 // CHECK: call <16 x i8> @llvm.s390.vlrl(i32 %{{.*}}, i8* %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000218 // CHECK-ASM: vlrlr
Ulrich Weigand6af25592017-07-17 17:47:35 +0000219
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000220 vec_store_len_r(vuc, ptruc, 0);
221 // CHECK: call void @llvm.s390.vstrl(<16 x i8> %{{.*}}, i32 0, i8* %{{.*}})
222 // CHECK-ASM: vstrl %{{.*}}, 0(%{{.*}}), 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000223 vec_store_len_r(vuc, ptruc, idx);
224 // CHECK: call void @llvm.s390.vstrl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000225 // CHECK-ASM: vstrlr
Ulrich Weigand6af25592017-07-17 17:47:35 +0000226
227 vf = vec_splat(vf, 0);
228 // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> zeroinitializer
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000229 // CHECK-ASM: vrepf
Ulrich Weigand6af25592017-07-17 17:47:35 +0000230 vf = vec_splat(vf, 1);
231 // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000232 // CHECK-ASM: vrepf
Ulrich Weigand6af25592017-07-17 17:47:35 +0000233 vd = vec_splat(vd, 0);
234 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000235 // CHECK-ASM: vrepg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000236 vd = vec_splat(vd, 1);
237 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 1>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000238 // CHECK-ASM: vrepg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000239
240 vf = vec_splats(f);
241 // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> zeroinitializer
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000242 // CHECK-ASM: vlrepf
Ulrich Weigand6af25592017-07-17 17:47:35 +0000243 vd = vec_splats(d);
244 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000245 // CHECK-ASM: vlrepg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000246
247 vf = vec_mergeh(vf, vf);
248 // shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000249 // CHECK-ASM: vmrhf
Ulrich Weigand6af25592017-07-17 17:47:35 +0000250 vd = vec_mergeh(vd, vd);
251 // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000252 // CHECK-ASM: vmrhg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000253
254 vf = vec_mergel(vf, vf);
255 // shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000256 // CHECK-ASM: vmrlf
Ulrich Weigand6af25592017-07-17 17:47:35 +0000257 vd = vec_mergel(vd, vd);
258 // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000259 // CHECK-ASM: vmrlg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000260}
261
262void test_compare(void) {
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000263 // CHECK-ASM-LABEL: test_compare
264
Ulrich Weigand6af25592017-07-17 17:47:35 +0000265 vbi = vec_cmpeq(vf, vf);
266 // CHECK: fcmp oeq <4 x float> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000267 // CHECK-ASM: vfcesb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000268 vbl = vec_cmpeq(vd, vd);
269 // CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000270 // CHECK-ASM: vfcedb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000271
272 vbi = vec_cmpge(vf, vf);
273 // CHECK: fcmp oge <4 x float> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000274 // CHECK-ASM: vfchesb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000275 vbl = vec_cmpge(vd, vd);
276 // CHECK: fcmp oge <2 x double> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000277 // CHECK-ASM: vfchedb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000278
279 vbi = vec_cmpgt(vf, vf);
280 // CHECK: fcmp ogt <4 x float> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000281 // CHECK-ASM: vfchsb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000282 vbl = vec_cmpgt(vd, vd);
283 // CHECK: fcmp ogt <2 x double> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000284 // CHECK-ASM: vfchdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000285
286 vbi = vec_cmple(vf, vf);
287 // CHECK: fcmp ole <4 x float> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000288 // CHECK-ASM: vfchesb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000289 vbl = vec_cmple(vd, vd);
290 // CHECK: fcmp ole <2 x double> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000291 // CHECK-ASM: vfchedb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000292
293 vbi = vec_cmplt(vf, vf);
294 // CHECK: fcmp olt <4 x float> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000295 // CHECK-ASM: vfchsb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000296 vbl = vec_cmplt(vd, vd);
297 // CHECK: fcmp olt <2 x double> %{{.*}}, %{{.*}}
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000298 // CHECK-ASM: vfchdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000299
300 idx = vec_all_eq(vf, vf);
301 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000302 // CHECK-ASM: vfcesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000303 idx = vec_all_eq(vd, vd);
304 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000305 // CHECK-ASM: vfcedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000306
307 idx = vec_all_ne(vf, vf);
308 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000309 // CHECK-ASM: vfcesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000310 idx = vec_all_ne(vd, vd);
311 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000312 // CHECK-ASM: vfcedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000313
314 idx = vec_all_ge(vf, vf);
315 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000316 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000317 idx = vec_all_ge(vd, vd);
318 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000319 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000320
321 idx = vec_all_gt(vf, vf);
322 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000323 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000324 idx = vec_all_gt(vd, vd);
325 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000326 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000327
328 idx = vec_all_le(vf, vf);
329 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000330 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000331 idx = vec_all_le(vd, vd);
332 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000333 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000334
335 idx = vec_all_lt(vf, vf);
336 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000337 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000338 idx = vec_all_lt(vd, vd);
339 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000340 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000341
Ulrich Weigand22ca9c62018-12-20 13:09:09 +0000342 idx = vec_all_nge(vf, vf);
343 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000344 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000345 idx = vec_all_nge(vd, vd);
346 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000347 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000348
349 idx = vec_all_ngt(vf, vf);
350 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000351 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000352 idx = vec_all_ngt(vd, vd);
353 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000354 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000355
356 idx = vec_all_nle(vf, vf);
357 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000358 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000359 idx = vec_all_nle(vd, vd);
360 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000361 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000362
363 idx = vec_all_nlt(vf, vf);
364 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000365 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000366 idx = vec_all_nlt(vd, vd);
367 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000368 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000369
370 idx = vec_all_nan(vf);
371 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000372 // CHECK-ASM: vftcisb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000373 idx = vec_all_nan(vd);
374 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000375 // CHECK-ASM: vftcidb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000376
377 idx = vec_all_numeric(vf);
378 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000379 // CHECK-ASM: vftcisb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000380 idx = vec_all_numeric(vd);
381 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000382 // CHECK-ASM: vftcidb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000383
384 idx = vec_any_eq(vf, vf);
385 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000386 // CHECK-ASM: vfcesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000387 idx = vec_any_eq(vd, vd);
388 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000389 // CHECK-ASM: vfcedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000390
391 idx = vec_any_ne(vf, vf);
392 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000393 // CHECK-ASM: vfcesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000394 idx = vec_any_ne(vd, vd);
395 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000396 // CHECK-ASM: vfcedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000397
398 idx = vec_any_ge(vf, vf);
399 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000400 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000401 idx = vec_any_ge(vd, vd);
402 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000403 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000404
405 idx = vec_any_gt(vf, vf);
406 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000407 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000408 idx = vec_any_gt(vd, vd);
409 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000410 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000411
412 idx = vec_any_le(vf, vf);
413 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000414 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000415 idx = vec_any_le(vd, vd);
416 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000417 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000418
419 idx = vec_any_lt(vf, vf);
420 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000421 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000422 idx = vec_any_lt(vd, vd);
423 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000424 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000425
426 idx = vec_any_nge(vf, vf);
427 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000428 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000429 idx = vec_any_nge(vd, vd);
430 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000431 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000432
433 idx = vec_any_ngt(vf, vf);
434 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000435 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000436 idx = vec_any_ngt(vd, vd);
437 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000438 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000439
440 idx = vec_any_nle(vf, vf);
441 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000442 // CHECK-ASM: vfchesbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000443 idx = vec_any_nle(vd, vd);
444 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000445 // CHECK-ASM: vfchedbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000446
447 idx = vec_any_nlt(vf, vf);
448 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000449 // CHECK-ASM: vfchsbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000450 idx = vec_any_nlt(vd, vd);
451 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000452 // CHECK-ASM: vfchdbs
Ulrich Weigand6af25592017-07-17 17:47:35 +0000453
454 idx = vec_any_nan(vf);
455 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000456 // CHECK-ASM: vftcisb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000457 idx = vec_any_nan(vd);
458 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000459 // CHECK-ASM: vftcidb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000460
461 idx = vec_any_numeric(vf);
462 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000463 // CHECK-ASM: vftcisb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000464 idx = vec_any_numeric(vd);
465 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000466 // CHECK-ASM: vftcidb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000467}
468
469void test_integer(void) {
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000470 // CHECK-ASM-LABEL: test_integer
471
Ulrich Weigand6af25592017-07-17 17:47:35 +0000472 vf = vec_andc(vf, vf);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000473 // CHECK-ASM: vnc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000474 vd = vec_andc(vd, vd);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000475 // CHECK-ASM: vnc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000476
477 vf = vec_nor(vf, vf);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000478 // CHECK-ASM: vno
Ulrich Weigand6af25592017-07-17 17:47:35 +0000479 vd = vec_nor(vd, vd);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000480 // CHECK-ASM: vno
Ulrich Weigand6af25592017-07-17 17:47:35 +0000481
482 vsc = vec_nand(vsc, vsc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000483 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000484 vuc = vec_nand(vuc, vuc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000485 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000486 vbc = vec_nand(vbc, vbc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000487 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000488 vss = vec_nand(vss, vss);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000489 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000490 vus = vec_nand(vus, vus);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000491 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000492 vbs = vec_nand(vbs, vbs);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000493 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000494 vsi = vec_nand(vsi, vsi);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000495 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000496 vui = vec_nand(vui, vui);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000497 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000498 vbi = vec_nand(vbi, vbi);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000499 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000500 vsl = vec_nand(vsl, vsl);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000501 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000502 vul = vec_nand(vul, vul);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000503 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000504 vbl = vec_nand(vbl, vbl);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000505 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000506 vf = vec_nand(vf, vf);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000507 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000508 vd = vec_nand(vd, vd);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000509 // CHECK-ASM: vnn
Ulrich Weigand6af25592017-07-17 17:47:35 +0000510
511 vsc = vec_orc(vsc, vsc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000512 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000513 vuc = vec_orc(vuc, vuc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000514 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000515 vbc = vec_orc(vbc, vbc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000516 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000517 vss = vec_orc(vss, vss);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000518 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000519 vus = vec_orc(vus, vus);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000520 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000521 vbs = vec_orc(vbs, vbs);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000522 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000523 vsi = vec_orc(vsi, vsi);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000524 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000525 vui = vec_orc(vui, vui);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000526 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000527 vbi = vec_orc(vbi, vbi);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000528 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000529 vsl = vec_orc(vsl, vsl);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000530 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000531 vul = vec_orc(vul, vul);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000532 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000533 vbl = vec_orc(vbl, vbl);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000534 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000535 vf = vec_orc(vf, vf);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000536 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000537 vd = vec_orc(vd, vd);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000538 // CHECK-ASM: voc
Ulrich Weigand6af25592017-07-17 17:47:35 +0000539
540 vsc = vec_eqv(vsc, vsc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000541 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000542 vuc = vec_eqv(vuc, vuc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000543 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000544 vbc = vec_eqv(vbc, vbc);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000545 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000546 vss = vec_eqv(vss, vss);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000547 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000548 vus = vec_eqv(vus, vus);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000549 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000550 vbs = vec_eqv(vbs, vbs);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000551 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000552 vsi = vec_eqv(vsi, vsi);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000553 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000554 vui = vec_eqv(vui, vui);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000555 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000556 vbi = vec_eqv(vbi, vbi);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000557 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000558 vsl = vec_eqv(vsl, vsl);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000559 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000560 vul = vec_eqv(vul, vul);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000561 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000562 vbl = vec_eqv(vbl, vbl);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000563 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000564 vf = vec_eqv(vf, vf);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000565 // CHECK-ASM: vnx
Ulrich Weigand6af25592017-07-17 17:47:35 +0000566 vd = vec_eqv(vd, vd);
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000567 // CHECK-ASM: vnx
568
569 vuc = vec_popcnt(vsc);
570 // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
571 // CHECK-ASM: vpopctb
572 vuc = vec_popcnt(vuc);
573 // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
574 // CHECK-ASM: vpopctb
575 vus = vec_popcnt(vss);
576 // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
577 // CHECK-ASM: vpopcth
578 vus = vec_popcnt(vus);
579 // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
580 // CHECK-ASM: vpopcth
581 vui = vec_popcnt(vsi);
582 // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
583 // CHECK-ASM: vpopctf
584 vui = vec_popcnt(vui);
585 // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
586 // CHECK-ASM: vpopctf
587 vul = vec_popcnt(vsl);
588 // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
589 // CHECK-ASM: vpopctg
590 vul = vec_popcnt(vul);
591 // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
592 // CHECK-ASM: vpopctg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000593
594 vf = vec_slb(vf, vsi);
595 // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000596 // CHECK-ASM: vslb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000597 vf = vec_slb(vf, vui);
598 // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000599 // CHECK-ASM: vslb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000600 vd = vec_slb(vd, vsl);
601 // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000602 // CHECK-ASM: vslb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000603 vd = vec_slb(vd, vul);
604 // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000605 // CHECK-ASM: vslb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000606
607 vf = vec_sld(vf, vf, 0);
608 // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000609 // CHECK-ASM: vsldb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000610 vf = vec_sld(vf, vf, 15);
611 // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000612 // CHECK-ASM: vsldb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000613 vd = vec_sld(vd, vd, 0);
614 // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000615 // CHECK-ASM: vsldb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000616 vd = vec_sld(vd, vd, 15);
617 // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000618 // CHECK-ASM: vsldb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000619
620 vf = vec_srab(vf, vsi);
621 // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000622 // CHECK-ASM: vsrab
Ulrich Weigand6af25592017-07-17 17:47:35 +0000623 vf = vec_srab(vf, vui);
624 // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000625 // CHECK-ASM: vsrab
Ulrich Weigand6af25592017-07-17 17:47:35 +0000626 vd = vec_srab(vd, vsl);
627 // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000628 // CHECK-ASM: vsrab
Ulrich Weigand6af25592017-07-17 17:47:35 +0000629 vd = vec_srab(vd, vul);
630 // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000631 // CHECK-ASM: vsrab
Ulrich Weigand6af25592017-07-17 17:47:35 +0000632
633 vf = vec_srb(vf, vsi);
634 // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000635 // CHECK-ASM: vsrlb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000636 vf = vec_srb(vf, vui);
637 // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000638 // CHECK-ASM: vsrlb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000639 vd = vec_srb(vd, vsl);
640 // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000641 // CHECK-ASM: vsrlb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000642 vd = vec_srb(vd, vul);
643 // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000644 // CHECK-ASM: vsrlb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000645
646 idx = vec_test_mask(vf, vui);
647 // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000648 // CHECK-ASM: vtm
Ulrich Weigand6af25592017-07-17 17:47:35 +0000649 idx = vec_test_mask(vd, vul);
650 // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000651 // CHECK-ASM: vtm
Ulrich Weigand6af25592017-07-17 17:47:35 +0000652
653 vuc = vec_msum_u128(vul, vul, vuc, 0);
654 // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000655 // CHECK-ASM: vmslg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000656 vuc = vec_msum_u128(vul, vul, vuc, 4);
657 // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000658 // CHECK-ASM: vmslg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000659 vuc = vec_msum_u128(vul, vul, vuc, 8);
660 // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000661 // CHECK-ASM: vmslg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000662 vuc = vec_msum_u128(vul, vul, vuc, 12);
663 // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000664 // CHECK-ASM: vmslg
Ulrich Weigand6af25592017-07-17 17:47:35 +0000665}
666
667void test_float(void) {
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000668 // CHECK-ASM-LABEL: test_float
669
Ulrich Weigand6af25592017-07-17 17:47:35 +0000670 vf = vec_abs(vf);
671 // CHECK: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000672 // CHECK-ASM: vflpsb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000673 vd = vec_abs(vd);
674 // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000675 // CHECK-ASM: vflpdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000676
677 vf = vec_nabs(vf);
678 // CHECK: [[ABS:%[^ ]+]] = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}})
679 // CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, [[ABS]]
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000680 // CHECK-ASM: vflnsb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000681 vd = vec_nabs(vd);
682 // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
683 // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[ABS]]
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000684 // CHECK-ASM: vflndb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000685
686 vf = vec_max(vf, vf);
687 // CHECK: call <4 x float> @llvm.s390.vfmaxsb(<4 x float> %{{.*}}, <4 x float> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000688 // CHECK-ASM: vfmaxsb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000689 vd = vec_max(vd, vd);
690 // CHECK: call <2 x double> @llvm.s390.vfmaxdb(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000691 // CHECK-ASM: vfmaxdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000692
693 vf = vec_min(vf, vf);
694 // CHECK: call <4 x float> @llvm.s390.vfminsb(<4 x float> %{{.*}}, <4 x float> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000695 // CHECK-ASM: vfminsb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000696 vd = vec_min(vd, vd);
697 // CHECK: call <2 x double> @llvm.s390.vfmindb(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000698 // CHECK-ASM: vfmindb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000699
700 vf = vec_madd(vf, vf, vf);
701 // CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000702 // CHECK-ASM: vfmasb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000703 vd = vec_madd(vd, vd, vd);
704 // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000705 // CHECK-ASM: vfmadb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000706
707 vf = vec_msub(vf, vf, vf);
708 // CHECK: [[NEG:%[^ ]+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
709 // CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000710 // CHECK-ASM: vfmssb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000711 vd = vec_msub(vd, vd, vd);
712 // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.*}}
713 // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000714 // CHECK-ASM: vfmsdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000715
716 vf = vec_nmadd(vf, vf, vf);
717 // CHECK: [[RES:%[^ ]+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
718 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, [[RES]]
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000719 // CHECK-ASM: vfnmasb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000720 vd = vec_nmadd(vd, vd, vd);
721 // CHECK: [[RES:%[^ ]+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
722 // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[RES]]
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000723 // CHECK-ASM: vfnmadb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000724
725 vf = vec_nmsub(vf, vf, vf);
726 // CHECK: [[NEG:%[^ ]+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
727 // CHECK: [[RES:%[^ ]+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
728 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, [[RES]]
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000729 // CHECK-ASM: vfnmssb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000730 vd = vec_nmsub(vd, vd, vd);
731 // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.*}}
732 // CHECK: [[RES:%[^ ]+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
733 // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[RES]]
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000734 // CHECK-ASM: vfnmsdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000735
736 vf = vec_sqrt(vf);
737 // CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000738 // CHECK-ASM: vfsqsb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000739 vd = vec_sqrt(vd);
740 // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000741 // CHECK-ASM: vfsqdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000742
743 vd = vec_doublee(vf);
744 // CHECK: fpext <2 x float> %{{.*}} to <2 x double>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000745 // CHECK-ASM: vldeb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000746 vf = vec_floate(vd);
747 // CHECK: fptrunc <2 x double> %{{.*}} to <2 x float>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000748 // CHECK-ASM: vledb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000749
750 vd = vec_double(vsl);
751 // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000752 // CHECK-ASM: vcdgb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000753 vd = vec_double(vul);
754 // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000755 // CHECK-ASM: vcdlgb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000756
757 vsl = vec_signed(vd);
758 // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000759 // CHECK-ASM: vcgdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000760 vul = vec_unsigned(vd);
761 // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000762 // CHECK-ASM: vclgdb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000763
764 vf = vec_roundp(vf);
765 // CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000766 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 6
Ulrich Weigand6af25592017-07-17 17:47:35 +0000767 vf = vec_ceil(vf);
768 // CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000769 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 6
Ulrich Weigand6af25592017-07-17 17:47:35 +0000770 vd = vec_roundp(vd);
771 // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000772 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
Ulrich Weigand6af25592017-07-17 17:47:35 +0000773 vd = vec_ceil(vd);
774 // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000775 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
Ulrich Weigand6af25592017-07-17 17:47:35 +0000776
777 vf = vec_roundm(vf);
778 // CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000779 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 7
Ulrich Weigand6af25592017-07-17 17:47:35 +0000780 vf = vec_floor(vf);
781 // CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000782 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 7
Ulrich Weigand6af25592017-07-17 17:47:35 +0000783 vd = vec_roundm(vd);
784 // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000785 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
Ulrich Weigand6af25592017-07-17 17:47:35 +0000786 vd = vec_floor(vd);
787 // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000788 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
Ulrich Weigand6af25592017-07-17 17:47:35 +0000789
790 vf = vec_roundz(vf);
791 // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000792 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
Ulrich Weigand6af25592017-07-17 17:47:35 +0000793 vf = vec_trunc(vf);
794 // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000795 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
Ulrich Weigand6af25592017-07-17 17:47:35 +0000796 vd = vec_roundz(vd);
797 // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000798 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
Ulrich Weigand6af25592017-07-17 17:47:35 +0000799 vd = vec_trunc(vd);
800 // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000801 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
Ulrich Weigand6af25592017-07-17 17:47:35 +0000802
803 vf = vec_roundc(vf);
804 // CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000805 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000806 vd = vec_roundc(vd);
807 // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000808 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000809
810 vf = vec_rint(vf);
811 // CHECK: call <4 x float> @llvm.rint.v4f32(<4 x float> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000812 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 0, 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000813 vd = vec_rint(vd);
814 // CHECK: call <2 x double> @llvm.rint.v2f64(<2 x double> %{{.*}})
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000815 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0
Ulrich Weigand6af25592017-07-17 17:47:35 +0000816
817 vf = vec_round(vf);
818 // CHECK: call <4 x float> @llvm.s390.vfisb(<4 x float> %{{.*}}, i32 4, i32 4)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000819 // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 4
Ulrich Weigand6af25592017-07-17 17:47:35 +0000820 vd = vec_round(vd);
821 // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000822 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 4
Ulrich Weigand6af25592017-07-17 17:47:35 +0000823
824 vbi = vec_fp_test_data_class(vf, 0, &cc);
825 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000826 // CHECK-ASM: vftcisb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000827 vbi = vec_fp_test_data_class(vf, 4095, &cc);
828 // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 4095)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000829 // CHECK-ASM: vftcisb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000830 vbl = vec_fp_test_data_class(vd, 0, &cc);
831 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000832 // CHECK-ASM: vftcidb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000833 vbl = vec_fp_test_data_class(vd, 4095, &cc);
834 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
Ulrich Weigand8ff7fec2018-12-20 13:10:47 +0000835 // CHECK-ASM: vftcidb
Ulrich Weigand6af25592017-07-17 17:47:35 +0000836}