Ulrich Weigand | 6af2559 | 2017-07-17 17:47:35 +0000 | [diff] [blame] | 1 | // REQUIRES: systemz-registered-target |
| 2 | // RUN: %clang_cc1 -target-cpu z14 -triple s390x-linux-gnu \ |
| 3 | // RUN: -O -fzvector -fno-lax-vector-conversions \ |
| 4 | // RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s |
| 5 | |
| 6 | #include <vecintrin.h> |
| 7 | |
| 8 | volatile vector signed char vsc; |
| 9 | volatile vector signed short vss; |
| 10 | volatile vector signed int vsi; |
| 11 | volatile vector signed long long vsl; |
| 12 | volatile vector unsigned char vuc; |
| 13 | volatile vector unsigned short vus; |
| 14 | volatile vector unsigned int vui; |
| 15 | volatile vector unsigned long long vul; |
| 16 | volatile vector bool char vbc; |
| 17 | volatile vector bool short vbs; |
| 18 | volatile vector bool int vbi; |
| 19 | volatile vector bool long long vbl; |
| 20 | volatile vector float vf; |
| 21 | volatile vector double vd; |
| 22 | |
| 23 | volatile signed char sc; |
| 24 | volatile signed short ss; |
| 25 | volatile signed int si; |
| 26 | volatile signed long long sl; |
| 27 | volatile unsigned char uc; |
| 28 | volatile unsigned short us; |
| 29 | volatile unsigned int ui; |
| 30 | volatile unsigned long long ul; |
| 31 | volatile float f; |
| 32 | volatile double d; |
| 33 | |
| 34 | const void * volatile cptr; |
| 35 | const signed char * volatile cptrsc; |
| 36 | const signed short * volatile cptrss; |
| 37 | const signed int * volatile cptrsi; |
| 38 | const signed long long * volatile cptrsl; |
| 39 | const unsigned char * volatile cptruc; |
| 40 | const unsigned short * volatile cptrus; |
| 41 | const unsigned int * volatile cptrui; |
| 42 | const unsigned long long * volatile cptrul; |
| 43 | const float * volatile cptrf; |
| 44 | const double * volatile cptrd; |
| 45 | |
| 46 | void * volatile ptr; |
| 47 | signed char * volatile ptrsc; |
| 48 | signed short * volatile ptrss; |
| 49 | signed int * volatile ptrsi; |
| 50 | signed long long * volatile ptrsl; |
| 51 | unsigned char * volatile ptruc; |
| 52 | unsigned short * volatile ptrus; |
| 53 | unsigned int * volatile ptrui; |
| 54 | unsigned long long * volatile ptrul; |
| 55 | float * volatile ptrf; |
| 56 | double * volatile ptrd; |
| 57 | |
| 58 | volatile unsigned int len; |
| 59 | volatile int idx; |
| 60 | int cc; |
| 61 | |
| 62 | void test_core(void) { |
| 63 | f = vec_extract(vf, idx); |
| 64 | // CHECK: extractelement <4 x float> %{{.*}}, i32 %{{.*}} |
| 65 | d = vec_extract(vd, idx); |
| 66 | // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}} |
| 67 | |
| 68 | vf = vec_insert(d, vf, idx); |
| 69 | // CHECK: insertelement <4 x float> %{{.*}}, float %{{.*}}, i32 %{{.*}} |
| 70 | vd = vec_insert(f, vd, idx); |
| 71 | // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}} |
| 72 | |
| 73 | vf = vec_promote(f, idx); |
| 74 | // CHECK: insertelement <4 x float> undef, float %{{.*}}, i32 %{{.*}} |
| 75 | vd = vec_promote(d, idx); |
| 76 | // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}} |
| 77 | |
| 78 | vf = vec_insert_and_zero(cptrf); |
| 79 | // CHECK: insertelement <4 x float> <float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %{{.*}}, i32 0 |
| 80 | vd = vec_insert_and_zero(cptrd); |
| 81 | // CHECK: insertelement <2 x double> <double undef, double 0.000000e+00>, double %{{.*}}, i32 0 |
| 82 | |
| 83 | vf = vec_perm(vf, vf, vuc); |
| 84 | // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 85 | vd = vec_perm(vd, vd, vuc); |
| 86 | // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 87 | |
| 88 | vul = vec_bperm_u128(vuc, vuc); |
| 89 | // CHECK: call <2 x i64> @llvm.s390.vbperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 90 | |
| 91 | vf = vec_sel(vf, vf, vui); |
| 92 | vf = vec_sel(vf, vf, vbi); |
| 93 | vd = vec_sel(vd, vd, vul); |
| 94 | vd = vec_sel(vd, vd, vbl); |
| 95 | |
| 96 | vf = vec_gather_element(vf, vui, cptrf, 0); |
| 97 | vf = vec_gather_element(vf, vui, cptrf, 1); |
| 98 | vf = vec_gather_element(vf, vui, cptrf, 2); |
| 99 | vf = vec_gather_element(vf, vui, cptrf, 3); |
| 100 | vd = vec_gather_element(vd, vul, cptrd, 0); |
| 101 | vd = vec_gather_element(vd, vul, cptrd, 1); |
| 102 | |
| 103 | vec_scatter_element(vf, vui, ptrf, 0); |
| 104 | vec_scatter_element(vf, vui, ptrf, 1); |
| 105 | vec_scatter_element(vf, vui, ptrf, 2); |
| 106 | vec_scatter_element(vf, vui, ptrf, 3); |
| 107 | vec_scatter_element(vd, vul, ptrd, 0); |
| 108 | vec_scatter_element(vd, vul, ptrd, 1); |
| 109 | |
| 110 | vf = vec_xl(idx, cptrf); |
| 111 | vd = vec_xl(idx, cptrd); |
| 112 | |
| 113 | vec_xst(vf, idx, ptrf); |
| 114 | vec_xst(vd, idx, ptrd); |
| 115 | |
| 116 | vd = vec_load_bndry(cptrd, 64); |
| 117 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) |
| 118 | vf = vec_load_bndry(cptrf, 64); |
| 119 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) |
| 120 | vf = vec_load_bndry(cptrf, 128); |
| 121 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 1) |
| 122 | vf = vec_load_bndry(cptrf, 256); |
| 123 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 2) |
| 124 | vf = vec_load_bndry(cptrf, 512); |
| 125 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 3) |
| 126 | vf = vec_load_bndry(cptrf, 1024); |
| 127 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 4) |
| 128 | vf = vec_load_bndry(cptrf, 2048); |
| 129 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 5) |
| 130 | vf = vec_load_bndry(cptrf, 4096); |
| 131 | // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 6) |
| 132 | |
| 133 | vf = vec_load_len(cptrf, idx); |
| 134 | // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) |
| 135 | vd = vec_load_len(cptrd, idx); |
| 136 | // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) |
| 137 | |
| 138 | vec_store_len(vf, ptrf, idx); |
| 139 | // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) |
| 140 | vec_store_len(vd, ptrd, idx); |
| 141 | // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) |
| 142 | |
| 143 | vuc = vec_load_len_r(cptruc, idx); |
| 144 | // CHECK: call <16 x i8> @llvm.s390.vlrl(i32 %{{.*}}, i8* %{{.*}}) |
| 145 | |
| 146 | vec_store_len_r(vuc, ptruc, idx); |
| 147 | // CHECK: call void @llvm.s390.vstrl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) |
| 148 | |
| 149 | vf = vec_splat(vf, 0); |
| 150 | // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> zeroinitializer |
| 151 | vf = vec_splat(vf, 1); |
| 152 | // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> |
| 153 | vd = vec_splat(vd, 0); |
| 154 | // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer |
| 155 | vd = vec_splat(vd, 1); |
| 156 | // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 1> |
| 157 | |
| 158 | vf = vec_splats(f); |
| 159 | // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> zeroinitializer |
| 160 | vd = vec_splats(d); |
| 161 | // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer |
| 162 | |
| 163 | vf = vec_mergeh(vf, vf); |
| 164 | // shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5> |
| 165 | vd = vec_mergeh(vd, vd); |
| 166 | // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2> |
| 167 | |
| 168 | vf = vec_mergel(vf, vf); |
| 169 | // shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <i32 2, i32 6, i32 3, i32 7> |
| 170 | vd = vec_mergel(vd, vd); |
| 171 | // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3> |
| 172 | } |
| 173 | |
| 174 | void test_compare(void) { |
| 175 | vbi = vec_cmpeq(vf, vf); |
| 176 | // CHECK: fcmp oeq <4 x float> %{{.*}}, %{{.*}} |
| 177 | vbl = vec_cmpeq(vd, vd); |
| 178 | // CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}} |
| 179 | |
| 180 | vbi = vec_cmpge(vf, vf); |
| 181 | // CHECK: fcmp oge <4 x float> %{{.*}}, %{{.*}} |
| 182 | vbl = vec_cmpge(vd, vd); |
| 183 | // CHECK: fcmp oge <2 x double> %{{.*}}, %{{.*}} |
| 184 | |
| 185 | vbi = vec_cmpgt(vf, vf); |
| 186 | // CHECK: fcmp ogt <4 x float> %{{.*}}, %{{.*}} |
| 187 | vbl = vec_cmpgt(vd, vd); |
| 188 | // CHECK: fcmp ogt <2 x double> %{{.*}}, %{{.*}} |
| 189 | |
| 190 | vbi = vec_cmple(vf, vf); |
| 191 | // CHECK: fcmp ole <4 x float> %{{.*}}, %{{.*}} |
| 192 | vbl = vec_cmple(vd, vd); |
| 193 | // CHECK: fcmp ole <2 x double> %{{.*}}, %{{.*}} |
| 194 | |
| 195 | vbi = vec_cmplt(vf, vf); |
| 196 | // CHECK: fcmp olt <4 x float> %{{.*}}, %{{.*}} |
| 197 | vbl = vec_cmplt(vd, vd); |
| 198 | // CHECK: fcmp olt <2 x double> %{{.*}}, %{{.*}} |
| 199 | |
| 200 | idx = vec_all_eq(vf, vf); |
| 201 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 202 | idx = vec_all_eq(vd, vd); |
| 203 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 204 | |
| 205 | idx = vec_all_ne(vf, vf); |
| 206 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 207 | idx = vec_all_ne(vd, vd); |
| 208 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 209 | |
| 210 | idx = vec_all_ge(vf, vf); |
| 211 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 212 | idx = vec_all_ge(vd, vd); |
| 213 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 214 | |
| 215 | idx = vec_all_gt(vf, vf); |
| 216 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 217 | idx = vec_all_gt(vd, vd); |
| 218 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 219 | |
| 220 | idx = vec_all_le(vf, vf); |
| 221 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 222 | idx = vec_all_le(vd, vd); |
| 223 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 224 | |
| 225 | idx = vec_all_lt(vf, vf); |
| 226 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 227 | idx = vec_all_lt(vd, vd); |
| 228 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 229 | |
| 230 | idx = vec_all_nge(vd, vd); |
| 231 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 232 | idx = vec_all_nge(vd, vd); |
| 233 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 234 | |
| 235 | idx = vec_all_ngt(vf, vf); |
| 236 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 237 | idx = vec_all_ngt(vd, vd); |
| 238 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 239 | |
| 240 | idx = vec_all_nle(vf, vf); |
| 241 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 242 | idx = vec_all_nle(vd, vd); |
| 243 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 244 | |
| 245 | idx = vec_all_nlt(vf, vf); |
| 246 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 247 | idx = vec_all_nlt(vd, vd); |
| 248 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 249 | |
| 250 | idx = vec_all_nan(vf); |
| 251 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15) |
| 252 | idx = vec_all_nan(vd); |
| 253 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) |
| 254 | |
| 255 | idx = vec_all_numeric(vf); |
| 256 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15) |
| 257 | idx = vec_all_numeric(vd); |
| 258 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) |
| 259 | |
| 260 | idx = vec_any_eq(vf, vf); |
| 261 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 262 | idx = vec_any_eq(vd, vd); |
| 263 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 264 | |
| 265 | idx = vec_any_ne(vf, vf); |
| 266 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 267 | idx = vec_any_ne(vd, vd); |
| 268 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 269 | |
| 270 | idx = vec_any_ge(vf, vf); |
| 271 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 272 | idx = vec_any_ge(vd, vd); |
| 273 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 274 | |
| 275 | idx = vec_any_gt(vf, vf); |
| 276 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 277 | idx = vec_any_gt(vd, vd); |
| 278 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 279 | |
| 280 | idx = vec_any_le(vf, vf); |
| 281 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 282 | idx = vec_any_le(vd, vd); |
| 283 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 284 | |
| 285 | idx = vec_any_lt(vf, vf); |
| 286 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 287 | idx = vec_any_lt(vd, vd); |
| 288 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 289 | |
| 290 | idx = vec_any_nge(vf, vf); |
| 291 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 292 | idx = vec_any_nge(vd, vd); |
| 293 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 294 | |
| 295 | idx = vec_any_ngt(vf, vf); |
| 296 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 297 | idx = vec_any_ngt(vd, vd); |
| 298 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 299 | |
| 300 | idx = vec_any_nle(vf, vf); |
| 301 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 302 | idx = vec_any_nle(vd, vd); |
| 303 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 304 | |
| 305 | idx = vec_any_nlt(vf, vf); |
| 306 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 307 | idx = vec_any_nlt(vd, vd); |
| 308 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 309 | |
| 310 | idx = vec_any_nan(vf); |
| 311 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15) |
| 312 | idx = vec_any_nan(vd); |
| 313 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) |
| 314 | |
| 315 | idx = vec_any_numeric(vf); |
| 316 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15) |
| 317 | idx = vec_any_numeric(vd); |
| 318 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) |
| 319 | } |
| 320 | |
| 321 | void test_integer(void) { |
| 322 | vf = vec_andc(vf, vf); |
| 323 | vd = vec_andc(vd, vd); |
| 324 | |
| 325 | vf = vec_nor(vf, vf); |
| 326 | vd = vec_nor(vd, vd); |
| 327 | |
| 328 | vsc = vec_nand(vsc, vsc); |
| 329 | vuc = vec_nand(vuc, vuc); |
| 330 | vbc = vec_nand(vbc, vbc); |
| 331 | vss = vec_nand(vss, vss); |
| 332 | vus = vec_nand(vus, vus); |
| 333 | vbs = vec_nand(vbs, vbs); |
| 334 | vsi = vec_nand(vsi, vsi); |
| 335 | vui = vec_nand(vui, vui); |
| 336 | vbi = vec_nand(vbi, vbi); |
| 337 | vsl = vec_nand(vsl, vsl); |
| 338 | vul = vec_nand(vul, vul); |
| 339 | vbl = vec_nand(vbl, vbl); |
| 340 | vf = vec_nand(vf, vf); |
| 341 | vd = vec_nand(vd, vd); |
| 342 | |
| 343 | vsc = vec_orc(vsc, vsc); |
| 344 | vuc = vec_orc(vuc, vuc); |
| 345 | vbc = vec_orc(vbc, vbc); |
| 346 | vss = vec_orc(vss, vss); |
| 347 | vus = vec_orc(vus, vus); |
| 348 | vbs = vec_orc(vbs, vbs); |
| 349 | vsi = vec_orc(vsi, vsi); |
| 350 | vui = vec_orc(vui, vui); |
| 351 | vbi = vec_orc(vbi, vbi); |
| 352 | vsl = vec_orc(vsl, vsl); |
| 353 | vul = vec_orc(vul, vul); |
| 354 | vbl = vec_orc(vbl, vbl); |
| 355 | vf = vec_orc(vf, vf); |
| 356 | vd = vec_orc(vd, vd); |
| 357 | |
| 358 | vsc = vec_eqv(vsc, vsc); |
| 359 | vuc = vec_eqv(vuc, vuc); |
| 360 | vbc = vec_eqv(vbc, vbc); |
| 361 | vss = vec_eqv(vss, vss); |
| 362 | vus = vec_eqv(vus, vus); |
| 363 | vbs = vec_eqv(vbs, vbs); |
| 364 | vsi = vec_eqv(vsi, vsi); |
| 365 | vui = vec_eqv(vui, vui); |
| 366 | vbi = vec_eqv(vbi, vbi); |
| 367 | vsl = vec_eqv(vsl, vsl); |
| 368 | vul = vec_eqv(vul, vul); |
| 369 | vbl = vec_eqv(vbl, vbl); |
| 370 | vf = vec_eqv(vf, vf); |
| 371 | vd = vec_eqv(vd, vd); |
| 372 | |
| 373 | vf = vec_slb(vf, vsi); |
| 374 | // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 375 | vf = vec_slb(vf, vui); |
| 376 | // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 377 | vd = vec_slb(vd, vsl); |
| 378 | // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 379 | vd = vec_slb(vd, vul); |
| 380 | // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 381 | |
| 382 | vf = vec_sld(vf, vf, 0); |
| 383 | // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) |
| 384 | vf = vec_sld(vf, vf, 15); |
| 385 | // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) |
| 386 | vd = vec_sld(vd, vd, 0); |
| 387 | // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) |
| 388 | vd = vec_sld(vd, vd, 15); |
| 389 | // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) |
| 390 | |
| 391 | vf = vec_srab(vf, vsi); |
| 392 | // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 393 | vf = vec_srab(vf, vui); |
| 394 | // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 395 | vd = vec_srab(vd, vsl); |
| 396 | // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 397 | vd = vec_srab(vd, vul); |
| 398 | // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 399 | |
| 400 | vf = vec_srb(vf, vsi); |
| 401 | // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 402 | vf = vec_srb(vf, vui); |
| 403 | // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 404 | vd = vec_srb(vd, vsl); |
| 405 | // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 406 | vd = vec_srb(vd, vul); |
| 407 | // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 408 | |
| 409 | idx = vec_test_mask(vf, vui); |
| 410 | // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 411 | idx = vec_test_mask(vd, vul); |
| 412 | // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) |
| 413 | |
| 414 | vuc = vec_msum_u128(vul, vul, vuc, 0); |
| 415 | // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 0) |
| 416 | vuc = vec_msum_u128(vul, vul, vuc, 4); |
| 417 | // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 4) |
| 418 | vuc = vec_msum_u128(vul, vul, vuc, 8); |
| 419 | // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 8) |
| 420 | vuc = vec_msum_u128(vul, vul, vuc, 12); |
| 421 | // CHECK: call <16 x i8> @llvm.s390.vmslg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}, i32 12) |
| 422 | } |
| 423 | |
| 424 | void test_float(void) { |
| 425 | vf = vec_abs(vf); |
| 426 | // CHECK: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}}) |
| 427 | vd = vec_abs(vd); |
| 428 | // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}}) |
| 429 | |
| 430 | vf = vec_nabs(vf); |
| 431 | // CHECK: [[ABS:%[^ ]+]] = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}}) |
| 432 | // CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, [[ABS]] |
| 433 | vd = vec_nabs(vd); |
| 434 | // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}}) |
| 435 | // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[ABS]] |
| 436 | |
| 437 | vf = vec_max(vf, vf); |
| 438 | // CHECK: call <4 x float> @llvm.s390.vfmaxsb(<4 x float> %{{.*}}, <4 x float> %{{.*}}, i32 0) |
| 439 | vd = vec_max(vd, vd); |
| 440 | // CHECK: call <2 x double> @llvm.s390.vfmaxdb(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i32 0) |
| 441 | |
| 442 | vf = vec_min(vf, vf); |
| 443 | // CHECK: call <4 x float> @llvm.s390.vfminsb(<4 x float> %{{.*}}, <4 x float> %{{.*}}, i32 0) |
| 444 | vd = vec_min(vd, vd); |
| 445 | // CHECK: call <2 x double> @llvm.s390.vfmindb(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i32 0) |
| 446 | |
| 447 | vf = vec_madd(vf, vf, vf); |
| 448 | // CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 449 | vd = vec_madd(vd, vd, vd); |
| 450 | // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 451 | |
| 452 | vf = vec_msub(vf, vf, vf); |
| 453 | // CHECK: [[NEG:%[^ ]+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}} |
| 454 | // CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]]) |
| 455 | vd = vec_msub(vd, vd, vd); |
| 456 | // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.*}} |
| 457 | // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]]) |
| 458 | |
| 459 | vf = vec_nmadd(vf, vf, vf); |
| 460 | // CHECK: [[RES:%[^ ]+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) |
| 461 | // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, [[RES]] |
| 462 | vd = vec_nmadd(vd, vd, vd); |
| 463 | // CHECK: [[RES:%[^ ]+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) |
| 464 | // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[RES]] |
| 465 | |
| 466 | vf = vec_nmsub(vf, vf, vf); |
| 467 | // CHECK: [[NEG:%[^ ]+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}} |
| 468 | // CHECK: [[RES:%[^ ]+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]]) |
| 469 | // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, [[RES]] |
| 470 | vd = vec_nmsub(vd, vd, vd); |
| 471 | // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.*}} |
| 472 | // CHECK: [[RES:%[^ ]+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]]) |
| 473 | // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[RES]] |
| 474 | |
| 475 | vf = vec_sqrt(vf); |
| 476 | // CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{.*}}) |
| 477 | vd = vec_sqrt(vd); |
| 478 | // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}}) |
| 479 | |
| 480 | vd = vec_doublee(vf); |
| 481 | // CHECK: fpext <2 x float> %{{.*}} to <2 x double> |
| 482 | vf = vec_floate(vd); |
| 483 | // CHECK: fptrunc <2 x double> %{{.*}} to <2 x float> |
| 484 | |
| 485 | vd = vec_double(vsl); |
| 486 | // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double> |
| 487 | vd = vec_double(vul); |
| 488 | // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double> |
| 489 | |
| 490 | vsl = vec_signed(vd); |
| 491 | // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64> |
| 492 | vul = vec_unsigned(vd); |
| 493 | // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64> |
| 494 | |
| 495 | vf = vec_roundp(vf); |
| 496 | // CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{.*}}) |
| 497 | vf = vec_ceil(vf); |
| 498 | // CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{.*}}) |
| 499 | vd = vec_roundp(vd); |
| 500 | // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}}) |
| 501 | vd = vec_ceil(vd); |
| 502 | // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}}) |
| 503 | |
| 504 | vf = vec_roundm(vf); |
| 505 | // CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{.*}}) |
| 506 | vf = vec_floor(vf); |
| 507 | // CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{.*}}) |
| 508 | vd = vec_roundm(vd); |
| 509 | // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}}) |
| 510 | vd = vec_floor(vd); |
| 511 | // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}}) |
| 512 | |
| 513 | vf = vec_roundz(vf); |
| 514 | // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) |
| 515 | vf = vec_trunc(vf); |
| 516 | // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) |
| 517 | vd = vec_roundz(vd); |
| 518 | // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) |
| 519 | vd = vec_trunc(vd); |
| 520 | // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) |
| 521 | |
| 522 | vf = vec_roundc(vf); |
| 523 | // CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{.*}}) |
| 524 | vd = vec_roundc(vd); |
| 525 | // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{.*}}) |
| 526 | |
| 527 | vf = vec_rint(vf); |
| 528 | // CHECK: call <4 x float> @llvm.rint.v4f32(<4 x float> %{{.*}}) |
| 529 | vd = vec_rint(vd); |
| 530 | // CHECK: call <2 x double> @llvm.rint.v2f64(<2 x double> %{{.*}}) |
| 531 | |
| 532 | vf = vec_round(vf); |
| 533 | // CHECK: call <4 x float> @llvm.s390.vfisb(<4 x float> %{{.*}}, i32 4, i32 4) |
| 534 | vd = vec_round(vd); |
| 535 | // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4) |
| 536 | |
| 537 | vbi = vec_fp_test_data_class(vf, 0, &cc); |
| 538 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 0) |
| 539 | vbi = vec_fp_test_data_class(vf, 4095, &cc); |
| 540 | // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 4095) |
| 541 | vbl = vec_fp_test_data_class(vd, 0, &cc); |
| 542 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0) |
| 543 | vbl = vec_fp_test_data_class(vd, 4095, &cc); |
| 544 | // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095) |
| 545 | } |